python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.FtrlOptimizer"])
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
See this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
This version has support for both online L2 (the L2 penalty given in the paper
above) and shrinkage-type L2 (which is the addition of an L2 penalty to the
loss function).
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate. See section 3.1 in the
[paper](https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
accum_name: The suffix for the variable that keeps the gradient squared
accumulator. If not present, defaults to name.
linear_name: The suffix for the variable that keeps the linear gradient
accumulator. If not present, defaults to name + "_1".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value %f needs to be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"l2_shrinkage_regularization_strength %f needs to be positive"
" or zero" % l2_shrinkage_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._l2_shrinkage_regularization_strength_tensor = None
self._accum_name = accum_name
self._linear_name = linear_name
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(
self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._accum_name or self._name)
self._zeros_slot(v, "linear", self._linear_name or self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_shrinkage_regularization_strength,
name="l2_shrinkage_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power, name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.apply_ftrl(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.apply_ftrl_v2(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_ftrl_v2(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/python/training/ftrl.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Proximal Gradient Descent operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import proximal_gradient_descent
class ProximalGradientDescentOptimizerTest(test.TestCase):
def doTestProximalGradientDescentwithoutRegularization(
self, use_resource=False):
with self.cached_session() as sess:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0])
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0])
else:
var0 = variables.Variable([0.0, 0.0])
var1 = variables.Variable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps Proximal Gradient Descent.
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-0.9, -1.8]), v0_val)
self.assertAllClose(np.array([-0.09, -0.18]), v1_val)
@test_util.run_deprecated_v1
def testProximalGradientDescentwithoutRegularization(self):
self.doTestProximalGradientDescentwithoutRegularization(use_resource=False)
@test_util.run_deprecated_v1
def testResourceProximalGradientDescentwithoutRegularization(self):
self.doTestProximalGradientDescentwithoutRegularization(use_resource=True)
@test_util.run_deprecated_v1
def testProximalGradientDescentwithoutRegularization2(self):
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 3 steps Proximal Gradient Descent
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([0.1, 0.2]), v0_val)
self.assertAllClose(np.array([3.91, 2.82]), v1_val)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = proximal_gradient_descent.ProximalGradientDescentOptimizer(
1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testProximalGradientDescentWithL1_L2(self):
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps Proximal Gradient Descent
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-0.0495, -0.0995]), v0_val)
self.assertAllClose(np.array([-0.0045, -0.0095]), v1_val)
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[1.0], [2.0]])
var1 = variables.Variable([[3.0], [4.0]])
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1]),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.02], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
else:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = self.evaluate([var0, var1])
if is_sparse:
self.assertAllClose([[1.0], [2.0]], v0_val)
self.assertAllClose([[3.0], [4.0]], v1_val)
else:
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
return v0_val, v1_val
@test_util.run_deprecated_v1
def testEquivSparseGradientDescentwithoutRegularization(self):
with self.cached_session():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
@test_util.run_deprecated_v1
def testEquivGradientDescentwithoutRegularization(self):
with self.cached_session():
val0, val1 = self.applyOptimizer(
proximal_gradient_descent.ProximalGradientDescentOptimizer(
3.0,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/proximal_gradient_descent_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic loop for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.basic_train_loop"])
def basic_train_loop(supervisor,
train_step_fn,
args=None,
kwargs=None,
master=""):
"""Basic loop to train a model.
Calls `train_step_fn` in a loop to train a model. The function is called as:
```python
train_step_fn(session, *args, **kwargs)
```
It is passed a `tf.compat.v1.Session` in addition to `args` and `kwargs`. The
function
typically runs one training step in the session.
Args:
supervisor: `tf.compat.v1.train.Supervisor` to run the training services.
train_step_fn: Callable to execute one training step. Called repeatedly as
`train_step_fn(session, *args **kwargs)`.
args: Optional positional arguments passed to `train_step_fn`.
kwargs: Optional keyword arguments passed to `train_step_fn`.
master: Master to use to create the training session. Defaults to `""`
which causes the session to be created in the local process.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
should_retry = True
while should_retry:
try:
should_retry = False
with supervisor.managed_session(master) as sess:
while not supervisor.should_stop():
train_step_fn(sess, *args, **kwargs)
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
should_retry = True
|
tensorflow-master
|
tensorflow/python/training/basic_loops.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Python wrappers around warm-starting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_ops
from tensorflow.python.training import saver as saver_lib
@test_util.run_v1_only('b/120545219')
class LoadAndRemapWrappersTest(test.TestCase):
"""Tests for the functionality of the Python wrappers."""
def setUp(self):
ops.reset_default_graph()
# Create the checkpoint file in a temporary directory.
checkpoint_prefix = os.path.join(self.get_temp_dir(), 'model')
# 0., 1., ..., 79. reshaped into [5, 16].
initializer = init_ops.constant_initializer(
np.reshape(np.linspace(0.0, 79, 5 * 16), (5, 16)))
with self.cached_session() as sess:
with variable_scope.variable_scope('some_scope'):
variable_scope.get_variable(name='embeddings', shape=[5, 16],
initializer=initializer)
self.evaluate(variables.global_variables_initializer())
saver = saver_lib.Saver()
saver.save(sess, checkpoint_prefix, global_step=5)
self.checkpoint_file = '{}-5'.format(checkpoint_prefix)
# Create the vocabulary files.
self.new_feature_vocab_file = os.path.join(
self.get_temp_dir(), 'new_feature_vocab.txt')
with open(self.new_feature_vocab_file, 'w') as f:
f.write('\n'.join(['zero', 'one', 'two', 'three', 'four']) + '\n')
self.old_feature_vocab_file = os.path.join(
self.get_temp_dir(), 'old_feature_vocab.txt')
with open(self.old_feature_vocab_file, 'w') as f:
f.write('\n'.join(['zero', 'one', 'two', 'three']) + '\n')
self.new_class_vocab_file = os.path.join(
self.get_temp_dir(), 'new_class_vocab.txt')
with open(self.new_class_vocab_file, 'w') as f:
f.write('\n'.join(['MISSING', 'knitting', 'flask', 'eminem']) + '\n')
self.old_class_vocab_file = os.path.join(
self.get_temp_dir(), 'old_class_vocab.txt')
with open(self.old_class_vocab_file, 'w') as f:
f.write('\n'.join(['knitting', 'eminem', 'MISSING']) + '\n')
self.init_val = 42
def _init_val_initializer(shape, dtype=None, partition_info=None):
del dtype, partition_info # Unused by this unit-testing initializer.
return array_ops.tile(
constant_op.constant([[self.init_val]], dtype=dtypes.float32), shape)
self.initializer = _init_val_initializer
def test_load_and_remap_matrix(self):
"""Tests the end-to-end loading / remapping of weights."""
# _load_and_remap_matrix() is the generalized wrapper that takes in row and
# column vocabulary files, calls the relevant remappings, and returns the
# weight matrix. Take this example to be linear multi-class by providing
# both row and column vocabularies.
remapped_matrix = checkpoint_ops._load_and_remap_matrix(
new_row_vocab_file=self.new_feature_vocab_file,
old_row_vocab_file=self.old_feature_vocab_file,
num_rows_to_load=4,
new_col_vocab_file=self.new_class_vocab_file,
old_col_vocab_file=self.old_class_vocab_file,
new_col_vocab_size=4,
old_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
new_row_vocab_offset=1,
initializer=self.initializer,
num_row_oov_buckets=1,
num_col_oov_buckets=1)
# [4 in vocab + 1 oov features, 4 in vocab + 1 oov classes]. The offset
# means we read from the first line.
expected_remapped_matrix = np.concatenate(
[
np.reshape([18, 34, 50, self.init_val, self.init_val], [5, 1]),
np.reshape([16, 32, 48, self.init_val, self.init_val], [5, 1]),
np.reshape([self.init_val] * 5, [5, 1]),
np.reshape([17, 33, 49, self.init_val, self.init_val], [5, 1]),
np.reshape([self.init_val] * 5, [5, 1])
],
axis=1)
with self.cached_session():
self.assertAllClose(expected_remapped_matrix,
self.evaluate(remapped_matrix))
def test_load_and_remap_output_layer_weight_initializer_linear(self):
"""Tests for the output layer initializer in the linear multi-class case."""
loading_initializer = (checkpoint_ops._load_and_remap_matrix_initializer(
new_row_vocab_size=5,
new_col_vocab_file=self.new_class_vocab_file,
old_col_vocab_file=self.old_class_vocab_file,
new_col_vocab_size=4,
old_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
new_row_vocab_file=self.new_feature_vocab_file,
old_row_vocab_file=self.old_feature_vocab_file,
num_row_oov_buckets=1,
num_col_oov_buckets=1,
initializer=self.initializer))
# The new weight matrix is of size
# [5 feature vocab + 1 feature OOV, 4 class vocab + 1 class OOV]. Use a
# partitioned variable to confirm that the offset logic works.
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50, self.init_val, self.init_val], [6, 1]),
np.reshape([0, 16, 32, 48, self.init_val, self.init_val], [6, 1]),
np.reshape([self.init_val] * 6, [6, 1]),
np.reshape([1, 17, 33, 49, self.init_val, self.init_val], [6, 1]),
np.reshape([self.init_val] * 6, [6, 1])
],
axis=1)
remapped_matrix = variable_scope.get_variable(
name='linear/obtained_weight_matrix',
shape=[6, 5],
initializer=loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
def test_load_and_remap_output_layer_weight_initializer_dnn_output(self):
"""Tests for the output layer initializer in the DNN output case."""
loading_initializer = (checkpoint_ops._load_and_remap_matrix_initializer(
new_row_vocab_size=5,
new_col_vocab_file=self.new_class_vocab_file,
old_col_vocab_file=self.old_class_vocab_file,
new_col_vocab_size=4,
old_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
num_col_oov_buckets=1,
initializer=self.initializer))
# The new weight matrix is of size
# [5-sized input layer, 4 class vocab + 1 class OOV].
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50, 66], [5, 1]),
np.reshape([0, 16, 32, 48, 64], [5, 1]),
np.reshape([self.init_val] * 5, [5, 1]),
np.reshape([1, 17, 33, 49, 65], [5, 1]),
np.reshape([self.init_val] * 5, [5, 1])
],
axis=1)
remapped_matrix = variable_scope.get_variable(
name='dnn_output/obtained_weight_matrix',
shape=[5, 5],
initializer=loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
def test_initializer_with_oov_only_partition(self):
"""Tests for the output layer initializer where one partition is all OOV."""
loading_initializer = (checkpoint_ops._load_and_remap_matrix_initializer(
new_row_vocab_size=5,
new_col_vocab_file=self.new_class_vocab_file,
old_col_vocab_file=self.old_class_vocab_file,
new_col_vocab_size=4,
old_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
new_row_vocab_file=self.new_feature_vocab_file,
old_row_vocab_file=self.old_feature_vocab_file,
num_row_oov_buckets=5,
num_col_oov_buckets=1,
initializer=self.initializer))
# The new weight matrix is of size
# [5 feature vocab + 5 feature OOV, 4 class vocab + 1 class OOV]. The
# second partition has only OOV.
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50] + [self.init_val] * 6, [10, 1]),
np.reshape([0, 16, 32, 48] + [self.init_val] * 6, [10, 1]),
np.reshape([self.init_val] * 10, [10, 1]),
np.reshape([1, 17, 33, 49] + [self.init_val] * 6, [10, 1]),
np.reshape([self.init_val] * 10, [10, 1]),
],
axis=1)
remapped_matrix = variable_scope.get_variable(
name='linear_all_oov/obtained_weight_matrix',
shape=[10, 5],
initializer=loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
def test_load_and_remap_linear_multiclass_initializer_default_init(self):
"""Tests where the zeros_initializer default is used for linear."""
loading_initializer = (checkpoint_ops._load_and_remap_matrix_initializer(
new_row_vocab_size=5,
new_col_vocab_file=self.new_class_vocab_file,
old_col_vocab_file=self.old_class_vocab_file,
new_col_vocab_size=4,
old_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
new_row_vocab_file=self.new_feature_vocab_file,
old_row_vocab_file=self.old_feature_vocab_file,
num_row_oov_buckets=1,
num_col_oov_buckets=1))
# Same as test_initializer_with_oov_only_partition, but with zero
# initialization.
expected_remapped_matrix = np.concatenate(
[
np.reshape([2, 18, 34, 50, 0, 0], [6, 1]),
np.reshape([0, 16, 32, 48, 0, 0], [6, 1]),
np.reshape([0] * 6, [6, 1]),
np.reshape([1, 17, 33, 49, 0, 0], [6, 1]),
np.reshape([0] * 6, [6, 1])
],
axis=1)
remapped_matrix = variable_scope.get_variable(
name='linear_init_fallback/obtained_weight_matrix',
shape=[6, 5],
initializer=loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_matrix,
remapped_matrix.as_tensor().eval())
def test_load_embedding_initializer(self):
"""Tests for the load_embedding_initializer wrapper."""
embedding_loading_initializer = (checkpoint_ops._load_embedding_initializer(
new_vocab_file=self.new_feature_vocab_file,
old_vocab_file=self.old_feature_vocab_file,
new_vocab_size=5,
embedding_dim=16,
embedding_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
num_oov_buckets=1,
initializer=self.initializer))
# The new weight matrix is of size
# [5 feature vocab + 1 feature OOV, 16 (embedding dimension)], where the
# last vocab row (2nd last row) is newly initialized (wasn't found in
# previous vocab) and the actual last row is OOV and also newly initialized.
# Use a partitioned variable to confirm that the offset logic works.
expected_remapped_embeddings = np.concatenate(
[
np.reshape(range(64), [4, 16]),
np.reshape([self.init_val] * 32, [2, 16]),
],
axis=0)
remapped_embeddings = variable_scope.get_variable(
name='embedding/obtained_embedding_matrix',
shape=[6, 16],
initializer=embedding_loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_embeddings,
remapped_embeddings.as_tensor().eval())
def test_load_embedding_initializer_large_oov(self):
"""Tests for the large OOV case for load_embedding_initializer wrapper."""
self.new_feature_vocab_file = os.path.join(
self.get_temp_dir(), 'new_feature_vocab.txt')
with open(self.new_feature_vocab_file, 'w') as f:
f.write('\n'.join(['one', 'zero', 'two', 'four']) + '\n')
# Checkpoint has 5 entries, 3 of which correspond to OOV.
self.old_feature_vocab_file = os.path.join(
self.get_temp_dir(), 'old_feature_vocab.txt')
with open(self.old_feature_vocab_file, 'w') as f:
f.write('\n'.join(['zero', 'one']) + '\n')
embedding_loading_initializer = (checkpoint_ops._load_embedding_initializer(
new_vocab_file=self.new_feature_vocab_file,
old_vocab_file=self.old_feature_vocab_file,
new_vocab_size=4,
embedding_dim=16,
embedding_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
num_oov_buckets=5,
initializer=self.initializer))
# The new weight matrix is of size
# [4 feature vocab + 5 feature OOV, 16 (embedding dimension)], where the
# 3rd and 4th rows are not found in the old vocabulary and therefore newly
# initialized. The last five rows are OOV and also newly initialized.
# Use a partitioned variable to confirm that the offset logic works.
expected_remapped_embeddings = np.concatenate(
[
np.reshape(range(16, 32), [1, 16]),
np.reshape(range(16), [1, 16]),
np.reshape([self.init_val] * 112, [7, 16]),
],
axis=0)
remapped_embeddings = variable_scope.get_variable(
name='embedding/obtained_embedding_matrix',
shape=[9, 16],
initializer=embedding_loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_embeddings,
remapped_embeddings.as_tensor().eval())
def test_load_embedding_initializer_old_row_vocab(self):
"""Tests for load_embedding_initializer where we constrain old vocab."""
embedding_loading_initializer = (
checkpoint_ops._load_embedding_initializer(
new_vocab_file=self.new_feature_vocab_file,
old_vocab_file=self.old_feature_vocab_file,
# Considered old vocabulary becomes ['zero', 'one', 'two']. This
# means 'three' in the new vocabulary is newly initialized.
old_vocab_size=3,
new_vocab_size=5,
embedding_dim=16,
embedding_tensor_name='some_scope/embeddings',
ckpt_path=[self.checkpoint_file],
num_oov_buckets=1,
initializer=self.initializer))
# The new weight matrix is of size
# [5 feature vocab + 1 feature OOV, 16 (embedding dimension)], where the
# last vocab row (2nd last row) is newly initialized (wasn't found in
# previous vocab) and the actual last row is OOV and also newly initialized.
# Use a partitioned variable to confirm that the offset logic works.
expected_remapped_embeddings = np.concatenate(
[
np.reshape(range(48), [3, 16]),
np.reshape([self.init_val] * 48, [3, 16]),
],
axis=0)
remapped_embeddings = variable_scope.get_variable(
name='embedding/obtained_embedding_matrix',
shape=[6, 16],
initializer=embedding_loading_initializer,
partitioner=partitioned_variables.fixed_size_partitioner(2))
with self.cached_session():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(expected_remapped_embeddings,
remapped_embeddings.as_tensor().eval())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/checkpoint_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated, please use ../distribute/distribution_strategy_context.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.python.distribute.distribution_strategy_context import *
|
tensorflow-master
|
tensorflow/python/training/distribution_strategy_context.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Utility classes for testing checkpointing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.training import saver as saver_module
class CheckpointedOp(object):
"""Op with a custom checkpointing implementation.
Defined as part of the test because the MutableHashTable Python code is
currently in contrib.
"""
# pylint: disable=protected-access
def __init__(self, name, table_ref=None):
if table_ref is None:
self.table_ref = gen_lookup_ops.mutable_hash_table_v2(
key_dtype=dtypes.string, value_dtype=dtypes.float32, name=name)
else:
self.table_ref = table_ref
self._name = name
if not context.executing_eagerly():
self._saveable = CheckpointedOp.CustomSaveable(self, name)
ops_lib.add_to_collection(ops_lib.GraphKeys.SAVEABLE_OBJECTS,
self._saveable)
@property
def name(self):
return self._name
@property
def saveable(self):
if context.executing_eagerly():
return CheckpointedOp.CustomSaveable(self, self.name)
else:
return self._saveable
def insert(self, keys, values):
return gen_lookup_ops.lookup_table_insert_v2(self.table_ref, keys, values)
def lookup(self, keys, default):
return gen_lookup_ops.lookup_table_find_v2(self.table_ref, keys, default)
def keys(self):
return self._export()[0]
def values(self):
return self._export()[1]
def _export(self):
return gen_lookup_ops.lookup_table_export_v2(self.table_ref, dtypes.string,
dtypes.float32)
class CustomSaveable(saver_module.BaseSaverBuilder.SaveableObject):
"""A custom saveable for CheckpointedOp."""
def __init__(self, table, name):
tensors = table._export()
specs = [
saver_module.BaseSaverBuilder.SaveSpec(tensors[0], "",
name + "-keys"),
saver_module.BaseSaverBuilder.SaveSpec(tensors[1], "",
name + "-values")
]
super(CheckpointedOp.CustomSaveable, self).__init__(table, specs, name)
def restore(self, restore_tensors, shapes):
return gen_lookup_ops.lookup_table_import_v2(
self.op.table_ref, restore_tensors[0], restore_tensors[1])
# pylint: enable=protected-access
|
tensorflow-master
|
tensorflow/python/training/saver_test_utils.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.AdagradOptimizer"])
class AdagradOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
or this
[intro](https://ppasupat.github.io/a9online/uploads/proximal_notes.pdf).
"""
def __init__(self, learning_rate, initial_accumulator_value=0.1,
use_locking=False, name="Adagrad"):
"""Construct a new Adagrad optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adagrad".
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
@compatibility(eager)
When eager execution is enabled, `learning_rate` can be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
@end_compatibility
"""
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value must be positive: %s" %
initial_accumulator_value)
super(AdagradOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_accumulator_value = initial_accumulator_value
# Created in Initialize.
self._learning_rate_tensor = None
def _create_slots(self, var_list):
for v in var_list:
dtype = v.dtype.base_dtype
if v.get_shape().is_fully_defined():
init = init_ops.constant_initializer(self._initial_accumulator_value,
dtype=dtype)
else:
init = self._init_constant_op(v, dtype)
self._get_or_make_slot_with_initializer(v, init, v.get_shape(), dtype,
"accumulator", self._name)
def _init_constant_op(self, v, dtype):
def init():
# Use a Tensor instead of initializer if variable does not have
# static shape.
init_constant = gen_array_ops.fill(array_ops.shape(v),
self._initial_accumulator_value)
return math_ops.cast(init_constant, dtype)
return init
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
def _apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_adagrad(
var,
acc,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
acc = self.get_slot(var, "accumulator")
return training_ops.resource_sparse_apply_adagrad(
var.handle,
acc.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/python/training/adagrad.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Device function for replicated training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util.tf_export import tf_export
# This is a tuple of PS ops used by tf.estimator.Estimator which should work in
# almost all of cases.
STANDARD_PS_OPS = ("Variable", "VariableV2", "AutoReloadVariable",
"MutableHashTable", "MutableHashTableV2",
"MutableHashTableOfTensors", "MutableHashTableOfTensorsV2",
"MutableDenseHashTable", "MutableDenseHashTableV2",
"VarHandleOp", "BoostedTreesEnsembleResourceHandleOp")
class _RoundRobinStrategy(object):
"""Returns the next ps task index for placement in round-robin order.
This class is not to be used directly by users. See instead
`replica_device_setter()` below.
"""
def __init__(self, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
"""
self._num_tasks = num_tasks
self._next_task = 0
def __call__(self, unused_op):
"""Choose a ps task index for the given `Operation`.
Args:
unused_op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
"""
task = self._next_task
self._next_task = (self._next_task + 1) % self._num_tasks
return task
class _ReplicaDeviceChooser(object):
"""Class to choose devices for Ops in a replicated training setup.
This class is not to be used directly by users. See instead
`replica_device_setter()` below.
"""
def __init__(self, ps_tasks, ps_device, worker_device, merge_devices, ps_ops,
ps_strategy):
"""Create a new `_ReplicaDeviceChooser`.
Args:
ps_tasks: Number of tasks in the `ps` job.
ps_device: String. Name of the `ps` job.
worker_device: String. Name of the `worker` job.
merge_devices: Boolean. Set to True to allow merging of device specs.
ps_ops: List of strings representing `Operation` types that need to be
placed on `ps` devices.
ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by
`ps_ops`), that takes the `Operation` and returns the ps task index to
use.
"""
self._ps_tasks = ps_tasks
self._ps_device = ps_device
self._worker_device = worker_device
self._merge_devices = merge_devices
self._ps_ops = ps_ops
self._ps_strategy = ps_strategy
def device_function(self, op):
"""Choose a device for `op`.
Args:
op: an `Operation`.
Returns:
The device to use for the `Operation`.
"""
# If we don't return early here, either merge_devices is True, or op.device
# is empty (in which case merging is a no-op). So we can always merge below.
if not self._merge_devices and op.device:
return op.device
current_device = pydev.DeviceSpec.from_string(op.device or "")
# The ps_device will be used for specified ops (ps_ops) whenever it is
# present and ps_tasks is non-zero. However, its task number will only be
# set (using ps_strategy) if there is a job field in ps_device that won't be
# changed by the job field (if present) in current_device.
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if self._ps_tasks and self._ps_device and node_def.op in self._ps_ops:
ps_device = pydev.DeviceSpec.from_string(self._ps_device)
current_job, ps_job = current_device.job, ps_device.job
if ps_job and (not current_job or current_job == ps_job):
ps_device = ps_device.replace(task=self._ps_strategy(op))
ps_device = ps_device.make_merged_spec(current_device)
return ps_device.to_string()
worker_device = pydev.DeviceSpec.from_string(self._worker_device or "")
worker_device = worker_device.make_merged_spec(current_device)
return worker_device.to_string()
@tf_export(v1=["train.replica_device_setter"])
def replica_device_setter(ps_tasks=0,
ps_device="/job:ps",
worker_device="/job:worker",
merge_devices=True,
cluster=None,
ps_ops=None,
ps_strategy=None):
"""Return a `device function` to use when building a Graph for replicas.
Device Functions are used in `with tf.device(device_function):` statement to
automatically assign devices to `Operation` objects as they are constructed,
Device constraints are added from the inner-most context first, working
outwards. The merging behavior adds constraints to fields that are yet unset
by a more inner context. Currently the fields are (job, task, cpu/gpu).
If `cluster` is `None`, and `ps_tasks` is 0, the returned function is a no-op.
Otherwise, the value of `ps_tasks` is derived from `cluster`.
By default, only Variable ops are placed on ps tasks, and the placement
strategy is round-robin over all ps tasks. A custom `ps_strategy` may be used
to do more intelligent placement, such as
`tf.contrib.training.GreedyLoadBalancingStrategy`.
For example,
```python
# To build a cluster with two ps jobs on hosts ps0 and ps1, and 3 worker
# jobs on hosts worker0, worker1 and worker2.
cluster_spec = {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}
with
tf.device(tf.compat.v1.train.replica_device_setter(cluster=cluster_spec)):
# Build your graph
v1 = tf.Variable(...) # assigned to /job:ps/task:0
v2 = tf.Variable(...) # assigned to /job:ps/task:1
v3 = tf.Variable(...) # assigned to /job:ps/task:0
# Run compute
```
Args:
ps_tasks: Number of tasks in the `ps` job. Ignored if `cluster` is
provided.
ps_device: String. Device of the `ps` job. If empty no `ps` job is used.
Defaults to `ps`.
worker_device: String. Device of the `worker` job. If empty no `worker`
job is used.
merge_devices: `Boolean`. If `True`, merges or only sets a device if the
device constraint is completely unset. merges device specification rather
than overriding them.
cluster: `ClusterDef` proto or `ClusterSpec`.
ps_ops: List of strings representing `Operation` types that need to be
placed on `ps` devices. If `None`, defaults to `STANDARD_PS_OPS`.
ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by
`ps_ops`), that takes the `Operation` and returns the ps task index to
use. If `None`, defaults to a round-robin strategy across all `ps`
devices.
Returns:
A function to pass to `tf.device()`.
Raises:
TypeError if `cluster` is not a dictionary or `ClusterDef` protocol buffer,
or if `ps_strategy` is provided but not a callable.
"""
if cluster is not None:
if isinstance(cluster, server_lib.ClusterSpec):
cluster_spec = cluster.as_dict()
else:
cluster_spec = server_lib.ClusterSpec(cluster).as_dict()
# Get ps_job_name from ps_device by stripping "/job:".
ps_job_name = pydev.DeviceSpec.from_string(ps_device).job
if ps_job_name not in cluster_spec or cluster_spec[ps_job_name] is None:
return None
ps_tasks = len(cluster_spec[ps_job_name])
if ps_tasks == 0:
return None
if ps_ops is None:
# TODO(sherrym): Variables in the LOCAL_VARIABLES collection should not be
# placed in the parameter server.
ps_ops = list(STANDARD_PS_OPS)
if not merge_devices:
logging.warning(
"DEPRECATION: It is recommended to set merge_devices=true in "
"replica_device_setter")
if ps_strategy is None:
ps_strategy = _RoundRobinStrategy(ps_tasks)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
chooser = _ReplicaDeviceChooser(ps_tasks, ps_device, worker_device,
merge_devices, ps_ops, ps_strategy)
return chooser.device_function
|
tensorflow-master
|
tensorflow/python/training/device_setter.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import sys
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as training_saver
from tensorflow.python.training import session_manager as sm
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow.python.util import function_utils
from tensorflow.python.util.tf_export import tf_export
# The list of exceptions that we should recover from. Exceptions not in this
# list may terminate the job.
_PREEMPTION_ERRORS = (errors.AbortedError, errors.UnavailableError)
# Value that indicates no value was provided.
USE_DEFAULT = object()
@tf_export(v1=['train.Scaffold'])
class Scaffold(object):
"""Structure to create or gather pieces commonly needed to train a model.
When you build a model for training you usually need ops to initialize
variables, a `Saver` to checkpoint them, an op to collect summaries for
the visualizer, and so on.
Various libraries built on top of the core TensorFlow library take care of
creating some or all of these pieces and storing them in well known
collections in the graph. The `Scaffold` class helps pick these pieces from
the graph collections, creating and adding them to the collections if needed.
If you call the scaffold constructor without any arguments, it will pick
pieces from the collections, creating default ones if needed when
`scaffold.finalize()` is called. You can pass arguments to the constructor to
provide your own pieces. Pieces that you pass to the constructor are not
added to the graph collections.
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.compat.v1.train.Saver` object taking care of saving the
variables.
Picked from and stored into the `SAVERS` collection in the graph by default.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph by default.
* `ready_op`: An op to verify that the variables are initialized. Picked
from and stored into the `READY_OP` collection in the graph by default.
* `ready_for_local_init_op`: An op to verify that global state has been
initialized and it is alright to run `local_init_op`. Picked from and
stored into the `READY_FOR_LOCAL_INIT_OP` collection in the graph by
default. This is needed when the initialization of local variables depends
on the values of global variables.
* `local_init_op`: An op to initialize the local variables. Picked
from and stored into the `LOCAL_INIT_OP` collection in the graph by default.
* `summary_op`: An op to run and merge the summaries in the graph. Picked
from and stored into the `SUMMARY_OP` collection in the graph by default.
You can also pass the following additional pieces to the constructor:
* `init_feed_dict`: A session feed dictionary that should be used when
running the init op.
* `init_fn`: A callable to run after the init op to perform additional
initializations. The callable will be called as
`init_fn(scaffold, session)`.
"""
def __init__(self,
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=None,
summary_op=None,
saver=None,
copy_from_scaffold=None):
"""Create a scaffold.
Args:
init_op: Optional op for initializing variables.
init_feed_dict: Optional session feed dictionary to use when running the
init_op.
init_fn: Optional function to use to initialize the model after running
the init_op. Will be called as `init_fn(scaffold, session)`.
ready_op: Optional op to verify that the variables are initialized. Must
return an empty 1D string tensor when the variables are initialized, or
a non-empty 1D string tensor listing the names of the non-initialized
variables.
ready_for_local_init_op: Optional op to verify that the global variables
are initialized and `local_init_op` can be run. Must return an empty 1D
string tensor when the global variables are initialized, or a non-empty
1D string tensor listing the names of the non-initialized global
variables.
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.compat.v1.train.Saver` object to use to save and
restore variables. May also be a `tf.train.Checkpoint` object, in which
case object-based checkpoints are saved. This will also load some
object-based checkpoints saved from elsewhere, but that loading may be
fragile since it uses fixed keys rather than performing a full
graph-based match. For example if a variable has two paths from the
`Checkpoint` object because two `Model` objects share the `Layer` object
that owns it, removing one `Model` may change the keys and break
checkpoint loading through this API, whereas a graph-based match would
match the variable through the other `Model`.
copy_from_scaffold: Optional scaffold object to copy fields from. Its
fields will be overwritten by the provided fields in this function.
"""
if copy_from_scaffold is not None:
if not isinstance(copy_from_scaffold, Scaffold):
raise TypeError('copy_from_scaffold is not a Scaffold instance.')
# We need _coalesce since Tensor is not converted to bool automatically,
# so the common idiom of (a or b) does not work.
coalesce = lambda a, b: a if a is not None else b
init_op = coalesce(init_op, copy_from_scaffold.init_op)
init_feed_dict = coalesce(init_feed_dict,
copy_from_scaffold.init_feed_dict)
# Use the original init_fn provided by the user to init the new Scaffold.
init_fn = coalesce(init_fn, copy_from_scaffold._user_init_fn) # pylint: disable=protected-access
ready_op = coalesce(ready_op, copy_from_scaffold.ready_op)
ready_for_local_init_op = coalesce(
ready_for_local_init_op, copy_from_scaffold.ready_for_local_init_op)
local_init_op = coalesce(local_init_op, copy_from_scaffold.local_init_op)
summary_op = coalesce(summary_op, copy_from_scaffold.summary_op)
saver = coalesce(saver, copy_from_scaffold.saver)
# NOTE(touts): modifying the init function to be passed the scaffold is a
# hack to make it easy to find the saver. Is there a better way?
self._user_init_fn = init_fn
if init_fn:
self._init_fn = lambda sess: init_fn(self, sess)
else:
self._init_fn = None
self._init_op = init_op
self._init_feed_dict = init_feed_dict
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._local_init_op = local_init_op
self._summary_op = summary_op
self._saver = saver
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default('init_op', ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = Scaffold.get_or_default('ready_op',
ops.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return array_ops.concat([
variables.report_uninitialized_variables(
variables.global_variables()),
resources.report_uninitialized_resources(
resources.shared_resources())
], 0)
self._ready_for_local_init_op = Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold.default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access
# pylint: enable=g-long-lambda
if isinstance(self._saver, trackable_util.Checkpoint):
self._saver = training_saver.Saver(
var_list=graph_view.ObjectGraphView(
self._saver).frozen_saveable_objects(),
sharded=True)
else:
self._saver.build()
ops.get_default_graph().finalize()
logging.info('Graph was finalized.')
return self
@property
def init_fn(self):
return self._init_fn
@property
def init_op(self):
return self._init_op
@property
def ready_op(self):
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def local_init_op(self):
return self._local_init_op
@property
def summary_op(self):
return self._summary_op
@property
def saver(self):
return self._saver
@property
def init_feed_dict(self):
return self._init_feed_dict
@staticmethod
def get_or_default(arg_name, collection_key, default_constructor):
"""Get from cache or create a default operation."""
elements = ops.get_collection(collection_key)
if elements:
if len(elements) > 1:
raise RuntimeError(
'More than one item in the collection "%s". '
'Please indicate which one to use by passing it to '
'the tf.Scaffold constructor as: '
'tf.Scaffold(%s=item to use)', collection_key, arg_name)
return elements[0]
op = default_constructor()
if op is not None:
ops.add_to_collection(collection_key, op)
return op
@staticmethod
def default_local_init_op():
"""Returns an op that groups the default local init ops.
This op is used during session initialization when a Scaffold is
initialized without specifying the local_init_op arg. It includes
`tf.compat.v1.local_variables_initializer`,
`tf.compat.v1.tables_initializer`, and also
initializes local session resources.
Returns:
The default Scaffold local init op.
"""
return control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer(),
resources.initialize_resources(resources.local_resources()))
def _create_monitored_session_with_worker_context(
worker_context, # pylint: disable=missing-docstring
scaffold,
checkpoint_dir=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=None,
save_summaries_steps=None,
save_summaries_secs=None,
config=None,
stop_grace_period_secs=120,
log_step_count_steps=100,
max_wait_secs=7200,
save_checkpoint_steps=None,
summary_dir=None):
all_hooks = []
if hooks:
all_hooks.extend(hooks)
if chief_only_hooks and worker_context.is_chief:
all_hooks.extend(chief_only_hooks)
# We need to call save or summary ops on all workers since these ops may
# contain collective ops, only running save ops on some workers would make
# collective ops hang. Therefore on those workers that don't need to actually
# write checkpoints or summaries, we let them write to a temp directory.
# pylint: disable=protected-access
if type(
worker_context._strategy).__name__ in ('CollectiveAllReduceStrategy',
'CollectiveAllReduceStrategyV1',
'MultiWorkerMirroredStrategy'):
if worker_context.task_type:
tmpdir = 'tmp_%s_%d' % (worker_context.task_type, worker_context.task_id)
else:
tmpdir = 'tmp'
if save_checkpoint_secs:
logging.warning('Collective ops may deadlock with '
'`save_checkpoints_secs` please use '
'`save_checkpoint_steps` instead. Clearing '
'`save_checkpoint_secs` and setting '
'`save_checkpoint_steps` to 1000 now.')
save_checkpoint_secs = None
save_checkpoint_steps = 1000
if save_summaries_secs:
logging.warning('Collective ops may run out of sync with'
'`save_summaries_secs`, please use '
'`save_summaries_steps` instead.')
else:
tmpdir = None
summary_dir = summary_dir or checkpoint_dir
if summary_dir and log_step_count_steps and log_step_count_steps > 0:
if worker_context.should_save_summary:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=summary_dir, every_n_steps=log_step_count_steps))
elif tmpdir:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=os.path.join(summary_dir, tmpdir),
every_n_steps=log_step_count_steps))
if (((save_summaries_steps and save_summaries_steps > 0) or
(save_summaries_secs and save_summaries_secs > 0)) and summary_dir):
if worker_context.should_save_summary:
all_hooks.append(
basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=summary_dir))
elif tmpdir:
all_hooks.append(
basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=os.path.join(summary_dir, tmpdir)))
if (((save_checkpoint_secs and save_checkpoint_secs > 0) or
(save_checkpoint_steps and save_checkpoint_steps > 0)) and
checkpoint_dir):
if worker_context.should_checkpoint:
all_hooks.append(
basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir,
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
elif tmpdir:
all_hooks.append(
basic_session_run_hooks.CheckpointSaverHook(
os.path.join(checkpoint_dir, tmpdir),
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
logging.info('all_hooks %r', all_hooks)
session_creator = worker_context.session_creator(
scaffold,
config=config,
checkpoint_dir=checkpoint_dir,
max_wait_secs=max_wait_secs)
return MonitoredSession(
session_creator=session_creator,
hooks=all_hooks,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export(v1=['train.MonitoredTrainingSession'])
def MonitoredTrainingSession(
master='', # pylint: disable=invalid-name
is_chief=True,
checkpoint_dir=None,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=USE_DEFAULT,
save_summaries_steps=USE_DEFAULT,
save_summaries_secs=USE_DEFAULT,
config=None,
stop_grace_period_secs=120,
log_step_count_steps=100,
max_wait_secs=7200,
save_checkpoint_steps=USE_DEFAULT,
summary_dir=None):
"""Creates a `MonitoredSession` for training.
For a chief, this utility sets proper session initializer/restorer. It also
creates hooks related to checkpoint and summary saving. For workers, this
utility sets proper session creator which waits for the chief to
initialize/restore. Please check `tf.compat.v1.train.MonitoredSession` for
more
information.
Args:
master: `String` the TensorFlow master to use.
is_chief: If `True`, it will take care of initialization and recovery the
underlying TensorFlow session. If `False`, it will wait on a chief to
initialize or recover the TensorFlow session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
scaffold: A `Scaffold` used for gathering or building supportive ops. If not
specified, a default one is created. It's used to finalize the graph.
hooks: Optional list of `SessionRunHook` objects.
chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if
`is_chief==True`, ignore otherwise.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If both `save_checkpoint_steps` and
`save_checkpoint_secs` are set to `None`, then the default checkpoint
saver isn't used. If both are provided, then only `save_checkpoint_secs`
is used. Default 600.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If both
`save_summaries_steps` and `save_summaries_secs` are set to `None`, then
the default summary saver isn't used. Default 100.
save_summaries_secs: The frequency, in secs, that the summaries are written
to disk using a default summary saver. If both `save_summaries_steps` and
`save_summaries_secs` are set to `None`, then the default summary saver
isn't used. Default not enabled.
config: an instance of `tf.compat.v1.ConfigProto` proto used to configure
the session. It's the `config` argument of constructor of
`tf.compat.v1.Session`.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec is logged.
max_wait_secs: Maximum time workers should wait for the session to become
available. This should be kept relatively short to help detect incorrect
code, but sometimes may need to be increased if the chief takes a while to
start up.
save_checkpoint_steps: The frequency, in number of global steps, that a
checkpoint is saved using a default checkpoint saver. If both
`save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then
the default checkpoint saver isn't used. If both are provided, then only
`save_checkpoint_secs` is used. Default not enabled.
summary_dir: A string. Optional path to a directory where to save
summaries. If None, checkpoint_dir is used instead.
Returns:
A `MonitoredSession` object.
"""
if save_summaries_steps == USE_DEFAULT and save_summaries_secs == USE_DEFAULT:
save_summaries_steps = 100
save_summaries_secs = None
elif save_summaries_secs == USE_DEFAULT:
save_summaries_secs = None
elif save_summaries_steps == USE_DEFAULT:
save_summaries_steps = None
if (save_checkpoint_steps == USE_DEFAULT and
save_checkpoint_secs == USE_DEFAULT):
save_checkpoint_steps = None
save_checkpoint_secs = 600
elif save_checkpoint_secs == USE_DEFAULT:
save_checkpoint_secs = None
elif save_checkpoint_steps == USE_DEFAULT:
save_checkpoint_steps = None
scaffold = scaffold or Scaffold()
worker_context = distribute_coordinator_context.get_current_worker_context()
if worker_context:
return _create_monitored_session_with_worker_context(
worker_context,
scaffold,
checkpoint_dir=checkpoint_dir,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
save_summaries_secs=save_summaries_secs,
config=config,
stop_grace_period_secs=stop_grace_period_secs,
log_step_count_steps=log_step_count_steps,
max_wait_secs=max_wait_secs,
save_checkpoint_steps=save_checkpoint_steps,
summary_dir=summary_dir)
if not is_chief:
session_creator = WorkerSessionCreator(
scaffold=scaffold,
master=master,
config=config,
max_wait_secs=max_wait_secs)
return MonitoredSession(
session_creator=session_creator,
hooks=hooks or [],
stop_grace_period_secs=stop_grace_period_secs)
all_hooks = []
if chief_only_hooks:
all_hooks.extend(chief_only_hooks)
session_creator = ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
master=master,
config=config)
summary_dir = summary_dir or checkpoint_dir
if summary_dir:
if log_step_count_steps and log_step_count_steps > 0:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=summary_dir, every_n_steps=log_step_count_steps))
if (save_summaries_steps and
save_summaries_steps > 0) or (save_summaries_secs and
save_summaries_secs > 0):
all_hooks.append(
basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=summary_dir))
if checkpoint_dir:
if (save_checkpoint_secs and
save_checkpoint_secs > 0) or (save_checkpoint_steps and
save_checkpoint_steps > 0):
all_hooks.append(
basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir,
save_steps=save_checkpoint_steps,
save_secs=save_checkpoint_secs,
scaffold=scaffold))
if hooks:
all_hooks.extend(hooks)
return MonitoredSession(
session_creator=session_creator,
hooks=all_hooks,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export(v1=['train.SessionCreator'])
@six.add_metaclass(abc.ABCMeta)
class SessionCreator(object):
"""A factory for tf.Session."""
@abc.abstractmethod
def create_session(self):
raise NotImplementedError(
'create_session is not implemented for {}.'.format(self))
@tf_export(v1=['train.ChiefSessionCreator'])
class ChiefSessionCreator(SessionCreator):
"""Creates a tf.compat.v1.Session for a chief."""
def __init__(self,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
"""
self._checkpoint_dir = checkpoint_dir
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
checkpoint_filename_with_path=self._checkpoint_filename_with_path,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
@tf_export(v1=['train.WorkerSessionCreator'])
class WorkerSessionCreator(SessionCreator):
"""Creates a tf.compat.v1.Session for a worker."""
def __init__(self,
scaffold=None,
master='',
config=None,
max_wait_secs=30 * 60):
"""Initializes a worker session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
max_wait_secs: Maximum time to wait for the session to become available.
"""
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
self._max_wait_secs = max_wait_secs
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().wait_for_session(
self._master, config=self._config, max_wait_secs=self._max_wait_secs)
class _MonitoredSession(object):
"""See `MonitoredSession` or `SingularMonitoredSession`."""
def __init__(self,
session_creator,
hooks,
should_recover,
stop_grace_period_secs=120):
"""Sets up a Monitored or Hooked Session.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` or a `WorkerSessionCreator`.
hooks: An iterable of `SessionRunHook' objects.
should_recover: A bool. Indicates whether to recover from `AbortedError`
and `UnavailableError` or not.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
self._graph_was_finalized = ops.get_default_graph().finalized
self._hooks = hooks or []
for h in self._hooks:
h.begin()
worker_context = distribute_coordinator_context.get_current_worker_context()
if not session_creator and worker_context:
session_creator = worker_context.session_creator()
# Create the session.
self._coordinated_creator = self._CoordinatedSessionCreator(
session_creator=session_creator or ChiefSessionCreator(),
hooks=self._hooks,
stop_grace_period_secs=stop_grace_period_secs)
if should_recover:
self._sess = _RecoverableSession(self._coordinated_creator)
else:
self._sess = self._coordinated_creator.create_session()
@property
def graph(self):
"""The graph that was launched in this session."""
if self._tf_sess() is None:
return None
return self._tf_sess().graph
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Run ops in the monitored session.
This method is completely compatible with the `tf.Session.run()` method.
Args:
fetches: Same as `tf.Session.run()`.
feed_dict: Same as `tf.Session.run()`.
options: Same as `tf.Session.run()`.
run_metadata: Same as `tf.Session.run()`.
Returns:
Same as `tf.Session.run()`.
"""
return self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
def run_step_fn(self, step_fn):
"""Run ops using a step function.
Args:
step_fn: A function or a method with a single argument of type
`StepContext`. The function may use methods of the argument to perform
computations with access to a raw session. The returned value of the
`step_fn` will be returned from `run_step_fn`, unless a stop is
requested. In that case, the next `should_stop` call will return True.
Example usage: ```python
with tf.Graph().as_default(): c =
tf.compat.v1.placeholder(dtypes.float32) v = tf.add(c, 4.0) w =
tf.add(c, 0.5)
def step_fn(step_context):
a = step_context.session.run(fetches=v, feed_dict={c: 0.5})
if a <= 4.5: step_context.request_stop()
return step_context.run_with_hooks(fetches=w, feed_dict={c: 0.1})
with tf.MonitoredSession() as session:
while not session.should_stop(): a = session.run_step_fn(step_fn)
``` Hooks interact with the `run_with_hooks()` call inside the
`step_fn` as they do with a `MonitoredSession.run` call.
Returns:
Returns the returned value of `step_fn`.
Raises:
StopIteration: if `step_fn` has called `request_stop()`. It may be
caught by `with tf.MonitoredSession()` to close the session.
ValueError: if `step_fn` doesn't have a single argument called
`step_context`. It may also optionally have `self` for cases when it
belongs to an object.
"""
step_fn_arguments = function_utils.fn_args(step_fn)
if step_fn_arguments != ('step_context',) and step_fn_arguments != (
'self',
'step_context',
):
raise ValueError(
'`step_fn` may either have one `step_context` argument, or'
' `self` and `step_context` arguments if it\'s an instance'
' method. Got {} instead.'.format(step_fn_arguments))
# `self._sess` is either `_RecoverableSession` or a `_CoordinatedSession`.
# Setting `run_with_hooks` to `None` will cause `run_with_hooks` to be
# `_CoordinatedSession.run` downstream in either case. This allows
# `_PREEMPTION_ERRORS` to propage from within `step_fn` to
# `_RecoverableSession.run_step_fn`.
return self._sess.run_step_fn(step_fn, self._tf_sess(), run_with_hooks=None)
class StepContext(object):
"""Control flow instrument for the `step_fn` from `run_step_fn()`.
Users of `step_fn` may perform `run()` calls without running hooks
by accessing the `session`. A `run()` call with hooks may be performed
using `run_with_hooks()`. Computation flow can be interrupted using
`request_stop()`.
"""
def __init__(self, session, run_with_hooks_fn):
"""Initializes the `step_context` argument for a `step_fn` invocation.
Args:
session: An instance of `tf.compat.v1.Session`.
run_with_hooks_fn: A function for running fetches and hooks.
"""
self._session = session
self._run_with_hooks_fn = run_with_hooks_fn
@property
def session(self):
return self._session
def run_with_hooks(self, *args, **kwargs):
"""Same as `MonitoredSession.run`. Accepts the same arguments."""
return self._run_with_hooks_fn(*args, **kwargs)
def request_stop(self):
"""Exit the training loop by causing `should_stop()` to return `True`.
Causes `step_fn` to exit by raising an exception.
Raises:
StopIteration
"""
raise StopIteration('step_fn has requested the iterations to stop.')
def should_stop(self):
return self._sess is None or self._sess.should_stop()
def close(self):
self._close_internal()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type in [errors.OutOfRangeError, StopIteration]:
exception_type = None
self._close_internal(exception_type)
# __exit__ should return True to suppress an exception.
return exception_type is None
class _CoordinatedSessionCreator(SessionCreator):
"""Factory for the _RecoverableSession."""
def __init__(self, session_creator, hooks, stop_grace_period_secs):
self._session_creator = session_creator
self._hooks = hooks
self.coord = None
self.tf_sess = None
self._stop_grace_period_secs = stop_grace_period_secs
def create_session(self):
"""Creates a coordinated session."""
# Keep the tf_sess for unit testing.
self.tf_sess = self._session_creator.create_session()
# We don't want coordinator to suppress any exception.
self.coord = coordinator.Coordinator(clean_stop_exception_types=[])
if ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)
# Inform the hooks that a new session has been created.
for hook in self._hooks:
hook.after_create_session(self.tf_sess, self.coord)
return _CoordinatedSession(
_HookedSession(self.tf_sess, self._hooks), self.coord,
self._stop_grace_period_secs)
def _close_internal(self, exception_type=None):
try:
if not exception_type:
for h in self._hooks:
h.end(self._coordinated_creator.tf_sess)
finally:
try:
if self._sess is None:
raise RuntimeError('Session is already closed.')
self._sess.close()
finally:
self._sess = None
self._coordinated_creator.tf_sess = None
self._coordinated_creator.coord = None
if not self._graph_was_finalized:
ops.get_default_graph()._unsafe_unfinalize() # pylint: disable=protected-access
def _is_closed(self):
"""Return True if the monitored session is closed.
For tests only.
Returns:
A boolean.
"""
return self._coordinated_creator.tf_sess is None
def _tf_sess(self):
"""Return underlying tf.compat.v1.Session object.
Warning: accessing the returned object in user code is likely to cause races
or "flaky tests".
Returns:
A tf.compat.v1.Session object.
"""
return self._coordinated_creator.tf_sess
@tf_export(v1=['train.MonitoredSession'])
class MonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, recovery and hooks.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with MonitoredSession(session_creator=ChiefSessionCreator(...),
hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the monitored session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
* calls `hook.after_create_session()`
Run: When `run()` is called, the monitored session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
* if `AbortedError` or `UnavailableError` occurs, it recovers or
reinitializes the session before executing the run() call again
Exit: At the `close()`, the monitored session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the monitored_session is used as a context
How to set `tf.compat.v1.Session` arguments:
* In most cases you can set session arguments as follows:
```python
MonitoredSession(
session_creator=ChiefSessionCreator(master=..., config=...))
```
* In distributed setting for a non-chief worker, you can use following:
```python
MonitoredSession(
session_creator=WorkerSessionCreator(master=..., config=...))
```
See `MonitoredTrainingSession` for an example usage based on chief or worker.
Note: This is not a `tf.compat.v1.Session`. For example, it cannot do
following:
* it cannot be set as default session.
* it cannot be sent to saver.save.
* it cannot be sent to tf.train.start_queue_runners.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` which is the default one.
hooks: An iterable of `SessionRunHook' objects.
Returns:
A MonitoredSession object.
"""
def __init__(self,
session_creator=None,
hooks=None,
stop_grace_period_secs=120):
super(MonitoredSession, self).__init__(
session_creator,
hooks,
should_recover=True,
stop_grace_period_secs=stop_grace_period_secs)
@tf_export(v1=['train.SingularMonitoredSession'])
class SingularMonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, restoring, and hooks.
Please note that this utility is not recommended for distributed settings.
For distributed settings, please use `tf.compat.v1.train.MonitoredSession`.
The
differences between `MonitoredSession` and `SingularMonitoredSession` are:
* `MonitoredSession` handles `AbortedError` and `UnavailableError` for
distributed settings, but `SingularMonitoredSession` does not.
* `MonitoredSession` can be created in `chief` or `worker` modes.
`SingularMonitoredSession` is always created as `chief`.
* You can access the raw `tf.compat.v1.Session` object used by
`SingularMonitoredSession`, whereas in MonitoredSession the raw session is
private. This can be used:
- To `run` without hooks.
- To save and restore.
* All other functionality is identical.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with SingularMonitoredSession(hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the hooked session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
Run: When `run()` is called, the hooked session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
Exit: At the `close()`, the hooked session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the `SingularMonitoredSession` is used as a context.
"""
def __init__(self,
hooks=None,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
stop_grace_period_secs=120,
checkpoint_filename_with_path=None):
"""Creates a SingularMonitoredSession.
Args:
hooks: An iterable of `SessionRunHook' objects.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
checkpoint_filename_with_path: A string. Optional path to a checkpoint
file from which to restore variables.
"""
session_creator = ChiefSessionCreator(
scaffold=scaffold,
master=master,
config=config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
super(SingularMonitoredSession, self).__init__(
session_creator,
hooks,
should_recover=False,
stop_grace_period_secs=stop_grace_period_secs)
def raw_session(self):
"""Returns underlying `TensorFlow.Session` object."""
return self._tf_sess()
class _WrappedSession(object):
"""Wrapper around a `tf.compat.v1.Session`.
This wrapper is used as a base class for various session wrappers
that provide additional functionality such as monitoring, coordination,
and recovery.
In addition to the methods exported by `SessionInterface` the wrapper
provides a method to check for stop and never raises exceptions from
calls to `close()`.
"""
def __init__(self, sess):
"""Creates a `_WrappedSession`.
Args:
sess: A `tf.compat.v1.Session` or `_WrappedSession` object. The wrapped
session.
"""
self._sess = sess
self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
def should_stop(self):
"""Return true if this session should not be used anymore.
Always return True if the session was closed.
Returns:
True if the session should stop, False otherwise.
"""
if self._check_stop():
return True
if self._sess:
return self._wrapped_is_stoppable and self._sess.should_stop()
return True
def _check_stop(self):
"""Hook for subclasses to provide their own stop condition.
Returns:
True if the session should stop, False otherwise.
"""
return False
def close(self):
if self._sess:
try:
self._sess.close()
except _PREEMPTION_ERRORS as e:
logging.warning(
'An error occurred when attempting to close the '
'session. This may be due to a preemption in a '
'connected worker or parameter server. Error: %s', e)
finally:
self._sess = None
def run(self, *args, **kwargs):
return self._sess.run(*args, **kwargs)
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
# `_RecoverableSession` sets `run_with_hooks` to `_CoordinatedSession.run`.
# It is `None` when called from `_CoordinatedSession`. In that case
# `self.run` is `_CoordinatedSession.run`.
run_with_hooks = run_with_hooks or self.run
return step_fn(_MonitoredSession.StepContext(raw_session, run_with_hooks))
class _RecoverableSession(_WrappedSession):
"""A wrapped session that recreates a session upon certain kinds of errors.
The constructor is passed a SessionCreator object, not a session.
Calls to `run()` are delegated to the wrapped session. If a call raises the
exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the
wrapped session is closed, and a new one is created by calling the factory
again.
"""
def __init__(self, sess_creator):
"""Create a new `_RecoverableSession`.
The value returned by calling `sess_creator.create_session()` will be the
session wrapped by this recoverable session.
Args:
sess_creator: A 'SessionCreator' to be wrapped by recoverable.
"""
self._sess_creator = sess_creator
_WrappedSession.__init__(self, self._create_session())
def _create_session(self):
while True:
try:
return self._sess_creator.create_session()
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised while a session was being created. '
'This may be due to a preemption of a connected worker '
'or parameter server. A new session will be created. '
'This error may also occur due to a gRPC failure caused '
'by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
def _check_stop(self):
try:
if self._sess:
return self._sess._check_stop() # pylint: disable=protected-access
else:
return True
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised while considering whether the '
'session is complete. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = self._create_session()
# Since we have just recreated the session, the overall computation should
# not stop:
return False
except Exception: # pylint: disable=broad-except
# `should_stop` should return True instead of raising an exception.
return True
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._create_session()
return self._sess.run(
fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = None
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
while True:
try:
if not self._sess:
self._sess = self._create_session()
run_with_hooks = self._sess.run
return self._sess.run_step_fn(step_fn, raw_session, run_with_hooks)
except _PREEMPTION_ERRORS as e:
logging.info(
'An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. This error may also occur due to a gRPC failure '
'caused by high memory or network bandwidth usage in the '
'parameter servers. If this error occurs repeatedly, try '
'increasing the number of parameter servers assigned to '
'the job. Error: %s', e)
self.close()
self._sess = None
class _CoordinatedSession(_WrappedSession):
"""A wrapped session that works with a `tf.Coordinator`.
Calls to `run()` are delegated to the wrapped session. If a call
raises an exception, the exception is reported to the coordinator.
In addition, after each call to `run()` this session ask the coordinator if
the session should stop. In that case it will will join all the threads
registered with the coordinator before returning.
If the coordinator was requested to stop with an exception, that exception
will be re-raised from the call to `run()`.
"""
def __init__(self, sess, coord, stop_grace_period_secs=120):
"""Create a new `_CoordinatedSession`.
Args:
sess: A `tf.compat.v1.Session` object. The wrapped session.
coord: A `tf.train.Coordinator` object.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
_WrappedSession.__init__(self, sess)
self._coord = coord
self._stop_grace_period_secs = stop_grace_period_secs
def _check_stop(self):
# If the coordinator was asked to stop due to an exception, then it needs
# to be propagated to this stack.
self._coord.raise_requested_exception()
# At this point, no exceptions are recorded in the coordinator.
return self._coord.should_stop()
def close(self):
self._coord.request_stop()
try:
self._coord.join(
stop_grace_period_secs=self._stop_grace_period_secs,
ignore_live_threads=True)
finally:
try:
_WrappedSession.close(self)
except Exception: # pylint: disable=broad-except
# We intentionally suppress exceptions from the close() here since
# useful exceptions are already reported by join().
pass
def run(self, *args, **kwargs):
try:
return self._sess.run(*args, **kwargs)
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
# A non-preemption error could have been caused by a preemption error
# in the coordinator. If this is the case, raise that exception instead,
# since it's the root cause. Otherwise, stick to the `original_exc_info`.
original_exc_info = sys.exc_info()
try:
self._coord.raise_requested_exception()
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
raise six.reraise(*original_exc_info)
else:
raise six.reraise(*original_exc_info)
class _HookedSession(_WrappedSession):
"""A _WrappedSession that calls hooks during calls to run().
The list of hooks to call is passed in the constructor. Before each call
to `run()` the session calls the `before_run()` method of the hooks, which
can return additional ops or tensors to run. These are added to the arguments
of the call to `run()`.
When the `run()` call finishes, the session calls the `after_run()` methods of
the hooks, passing the values returned by the `run()` call corresponding to
the ops and tensors that each hook requested.
If any call to the hooks, requests stop via run_context the session will be
marked as needing to stop and its `should_stop()` method will now return
`True`.
"""
def __init__(self, sess, hooks):
"""Initializes a _HookedSession object.
Args:
sess: A `tf.compat.v1.Session` or a `_WrappedSession` object.
hooks: An iterable of `SessionRunHook' objects.
"""
_WrappedSession.__init__(self, sess)
self._hooks = hooks
self._should_stop = False
def _check_stop(self):
"""See base class."""
return self._should_stop
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""See base class."""
if self.should_stop():
raise RuntimeError('Run called even after should_stop requested.')
actual_fetches = {'caller': fetches}
run_context = session_run_hook.SessionRunContext(
original_args=session_run_hook.SessionRunArgs(fetches, feed_dict),
session=self._sess)
options = options or config_pb2.RunOptions()
feed_dict = self._call_hook_before_run(run_context, actual_fetches,
feed_dict, options)
# Do session run.
run_metadata = run_metadata or config_pb2.RunMetadata()
outputs = _WrappedSession.run(
self,
fetches=actual_fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
for hook in self._hooks:
hook.after_run(
run_context,
session_run_hook.SessionRunValues(
results=outputs[hook] if hook in outputs else None,
options=options,
run_metadata=run_metadata))
self._should_stop = self._should_stop or run_context.stop_requested
return outputs['caller']
def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict,
options):
"""Calls hooks.before_run and handles requests from hooks."""
hook_feeds = {}
for hook in self._hooks:
request = hook.before_run(run_context)
if request is not None:
if request.fetches is not None:
fetch_dict[hook] = request.fetches
if request.feed_dict:
self._raise_if_feeds_intersects(hook_feeds, request.feed_dict,
'Same tensor is fed by two hooks.')
hook_feeds.update(request.feed_dict)
if request.options:
self._merge_run_options(options, request.options)
if not hook_feeds:
return user_feed_dict
if not user_feed_dict:
return hook_feeds
self._raise_if_feeds_intersects(
user_feed_dict, hook_feeds,
'Same tensor is fed by a SessionRunHook and user.')
hook_feeds.update(user_feed_dict)
return hook_feeds
def _raise_if_feeds_intersects(self, feeds1, feeds2, message):
intersection = set(feeds1.keys()) & set(feeds2.keys())
if intersection:
raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection)))
def _merge_run_options(self, options, incoming_options):
"""Merge two instances of RunOptions into the first one.
During the merger, the numerical fields including trace_level,
timeout_in_ms, inter_op_thread_pool are set to the larger one of the two.
The boolean value is set to the logical OR of the two.
debug_tensor_watch_opts of the original options is extended with that from
the incoming one.
Args:
options: The options to merge into.
incoming_options: The options to be merged into the first argument.
"""
options.trace_level = max(options.trace_level, incoming_options.trace_level)
options.timeout_in_ms = max(options.timeout_in_ms,
incoming_options.timeout_in_ms)
options.inter_op_thread_pool = max(options.inter_op_thread_pool,
incoming_options.inter_op_thread_pool)
options.output_partition_graphs = max(
options.output_partition_graphs,
incoming_options.output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
incoming_options.debug_options.debug_tensor_watch_opts)
options.debug_options.reset_disk_byte_usage = (
options.debug_options.reset_disk_byte_usage or
incoming_options.debug_options.reset_disk_byte_usage)
options.report_tensor_allocations_upon_oom = (
options.report_tensor_allocations_upon_oom or
incoming_options.report_tensor_allocations_upon_oom)
|
tensorflow-master
|
tensorflow/python/training/monitored_session.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# Picked a long key value to minimize the chance of collision with user defined
# collection keys.
GLOBAL_STEP_READ_KEY = 'global_step_read_op_cache'
# TODO(drpng): remove this after legacy uses are resolved.
write_graph = graph_io.write_graph
@tf_export(v1=['train.global_step'])
def global_step(sess, global_step_tensor):
"""Small helper to get the global step.
```python
# Create a variable to hold the global_step.
global_step_tensor = tf.Variable(10, trainable=False, name='global_step')
# Create a session.
sess = tf.compat.v1.Session()
# Initialize the variable
sess.run(global_step_tensor.initializer)
# Get the variable value.
print('global_step: %s' % tf.compat.v1.train.global_step(sess,
global_step_tensor))
global_step: 10
```
Args:
sess: A TensorFlow `Session` object.
global_step_tensor: `Tensor` or the `name` of the operation that contains
the global step.
Returns:
The global step value.
"""
if context.executing_eagerly():
return int(global_step_tensor.numpy())
return int(sess.run(global_step_tensor))
@tf_export(v1=['train.get_global_step'])
def get_global_step(graph=None):
"""Get the global step tensor.
The global step tensor must be an integer variable. We first try to find it
in the collection `GLOBAL_STEP`, or by name `global_step:0`.
Args:
graph: The graph to find the global step in. If missing, use default graph.
Returns:
The global step variable, or `None` if none was found.
Raises:
TypeError: If the global step tensor has a non-integer type, or if it is not
a `Variable`.
"""
graph = graph or ops.get_default_graph()
global_step_tensor = None
global_step_tensors = graph.get_collection(ops.GraphKeys.GLOBAL_STEP)
if len(global_step_tensors) == 1:
global_step_tensor = global_step_tensors[0]
elif not global_step_tensors:
try:
global_step_tensor = graph.get_tensor_by_name('global_step:0')
except KeyError:
return None
else:
logging.error('Multiple tensors in global_step collection.')
return None
assert_global_step(global_step_tensor)
return global_step_tensor
@tf_export(v1=['train.create_global_step'])
def create_global_step(graph=None):
"""Create global step tensor in graph.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
graph = graph or ops.get_default_graph()
if get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
if context.executing_eagerly():
with ops.device('cpu:0'):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA,
collections=[
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP
])
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
@tf_export(v1=['train.get_or_create_global_step'])
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
graph = graph or ops.get_default_graph()
global_step_tensor = get_global_step(graph)
if global_step_tensor is None:
global_step_tensor = create_global_step(graph)
return global_step_tensor
@tf_export(v1=['train.assert_global_step'])
def assert_global_step(global_step_tensor):
"""Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.
Args:
global_step_tensor: `Tensor` to test.
"""
if not (isinstance(global_step_tensor, variables.Variable) or
isinstance(global_step_tensor, ops.Tensor) or
resource_variable_ops.is_resource_variable(global_step_tensor)):
raise TypeError('Existing "global_step" must be a Variable or Tensor: %s.' %
global_step_tensor)
if not global_step_tensor.dtype.base_dtype.is_integer:
raise TypeError('Existing "global_step" does not have integer type: %s' %
global_step_tensor.dtype)
if (global_step_tensor.get_shape().ndims != 0 and
global_step_tensor.get_shape().is_fully_defined()):
raise TypeError('Existing "global_step" is not scalar: %s' %
global_step_tensor.get_shape())
def _get_global_step_read(graph=None):
"""Gets global step read tensor in graph.
Args:
graph: The graph in which to create the global step read tensor. If missing,
use default graph.
Returns:
Global step read tensor.
Raises:
RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.
"""
graph = graph or ops.get_default_graph()
global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)
if len(global_step_read_tensors) > 1:
raise RuntimeError('There are multiple items in collection {}. '
'There should be only one.'.format(GLOBAL_STEP_READ_KEY))
if len(global_step_read_tensors) == 1:
return global_step_read_tensors[0]
return None
def _get_or_create_global_step_read(graph=None):
"""Gets or creates global step read tensor in graph.
Args:
graph: The graph in which to create the global step read tensor. If missing,
use default graph.
Returns:
Global step read tensor if there is global_step_tensor else return None.
"""
graph = graph or ops.get_default_graph()
global_step_read_tensor = _get_global_step_read(graph)
if global_step_read_tensor is not None:
return global_step_read_tensor
global_step_tensor = get_global_step(graph)
if global_step_tensor is None:
return None
# add 'zero' so that it will create a copy of variable as Tensor.
with graph.as_default() as g, g.name_scope(None):
with g.name_scope(global_step_tensor.op.name + '/'):
# using initialized_value to ensure that global_step is initialized before
# this run. This is needed for example Estimator makes all model_fn build
# under global_step_read_tensor dependency.
global_step_value = global_step_tensor.initialized_value() if isinstance(
global_step_tensor, variables.Variable) else global_step_tensor
global_step_read_tensor = global_step_value + 0
ops.add_to_collection(GLOBAL_STEP_READ_KEY, global_step_read_tensor)
return _get_global_step_read(graph)
def _increment_global_step(increment, graph=None):
graph = graph or ops.get_default_graph()
global_step_tensor = get_global_step(graph)
if global_step_tensor is None:
raise ValueError(
'Global step tensor should be created by '
'tf.train.get_or_create_global_step before calling increment.')
global_step_read_tensor = _get_or_create_global_step_read(graph)
with graph.as_default() as g, g.name_scope(None):
with g.name_scope(global_step_tensor.op.name + '/'):
with ops.control_dependencies([global_step_read_tensor]):
return state_ops.assign_add(global_step_tensor, increment)
|
tensorflow-master
|
tensorflow/python/training/training_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(test.TestCase):
def doTestFtrlwithoutRegularization(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
if use_resource:
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=False)
@test_util.run_deprecated_v1
def testResourceFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=True)
@test_util.run_deprecated_v1
def testFtrlwithoutRegularization2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), v1_val)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testFtrlWithL1(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL1_L2(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.22578995, -0.44345796]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.14378493, -0.13229476]), v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL1_L2_L2ShrinkageSparse(self):
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
opt = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val)
self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val)
@test_util.run_deprecated_v1
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
opt0 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
opt1 = ftrl.FtrlOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update0 = opt0.apply_gradients([(grads0, var0)])
update1 = opt1.apply_gradients([(grads1, var1)])
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update0.run()
update1.run()
v0_val, v1_val = self.evaluate([var0, var1])
# var0 is experiencing L2 shrinkage so it should be smaller than var1
# in magnitude.
self.assertTrue((v0_val**2 < v1_val**2).all())
accum0 = list(self.evaluate(opt0._slots)["accum"].values())[0]
accum1 = list(self.evaluate(opt1._slots)["accum"].values())[0]
# L2 shrinkage should not change how we update grad accumulator.
self.assertAllCloseAccordingToType(accum0, accum1)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[0.0], [0.0]], dtype=dtype)
var1 = variables.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = self.evaluate([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
@test_util.run_deprecated_v1
def testEquivAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
@test_util.run_deprecated_v1
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
@test_util.run_deprecated_v1
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
@test_util.run_deprecated_v1
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [dtypes.half, dtypes.float32]:
with self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.FtrlOptimizer(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.cached_session():
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/ftrl_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Proximal Adagrad operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import proximal_adagrad
class ProximalAdagradOptimizerTest(test.TestCase):
def doTestProximalAdagradwithoutRegularization(self, use_resource=False):
with self.cached_session() as sess:
var0 = variables.Variable([0.0, 0.0])
var1 = variables.Variable([0.0, 0.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-2.60260963, -4.29698515]), v0_val)
self.assertAllClose(np.array([-0.28432083, -0.56694895]), v1_val)
opt_vars = opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var1._shared_name)
self.assertEqual(2, len(opt_vars))
@test_util.run_deprecated_v1
def testProximalAdagradwithoutRegularization(self):
self.doTestProximalAdagradwithoutRegularization(use_resource=False)
@test_util.run_deprecated_v1
def testResourceProximalAdagradwithoutRegularization(self):
self.doTestProximalAdagradwithoutRegularization(use_resource=True)
@test_util.run_deprecated_v1
def testProximalAdagradwithoutRegularization2(self):
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-1.60261, -2.296985]), v0_val)
self.assertAllClose(np.array([3.715679, 2.433051]), v1_val)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = proximal_adagrad.ProximalAdagradOptimizer(1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testProximalAdagradWithL1(self):
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps Proximal Adagrad
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-6.663634, -9.190331]), v0_val)
self.assertAllClose(np.array([2.959304, 1.029232]), v1_val)
@test_util.run_deprecated_v1
def testProximalAdagradWithL1_L2(self):
with self.cached_session() as sess:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([4.0, 3.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
opt = proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([4.0, 3.0], v1_val)
# Run 10 steps Proximal Adagrad.
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose(np.array([-0.0495, -0.0995]), v0_val)
self.assertAllClose(np.array([-0.0045, -0.0095]), v1_val)
def applyOptimizer(self, opt, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[1.0], [2.0]])
var1 = variables.Variable([[3.0], [4.0]])
grads0 = ops.IndexedSlices(
constant_op.constant(
[0.1], shape=[1, 1]),
constant_op.constant([0]),
constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[0.02], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
else:
var0 = variables.Variable([1.0, 2.0])
var1 = variables.Variable([3.0, 4.0])
grads0 = constant_op.constant([0.1, 0.2])
grads1 = constant_op.constant([0.01, 0.02])
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
sess = ops.get_default_session()
v0_val, v1_val = self.evaluate([var0, var1])
if is_sparse:
self.assertAllClose([[1.0], [2.0]], v0_val)
self.assertAllClose([[3.0], [4.0]], v1_val)
else:
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
return v0_val, v1_val
@test_util.run_deprecated_v1
def testEquivAdagradwithoutRegularization(self):
with self.cached_session():
val0, val1 = self.applyOptimizer(
proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0))
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1))
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
@test_util.run_deprecated_v1
def testEquivSparseAdagradwithoutRegularization(self):
with self.cached_session():
val0, val1 = self.applyOptimizer(
proximal_adagrad.ProximalAdagradOptimizer(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
is_sparse=True)
with self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(
3.0, initial_accumulator_value=0.1),
is_sparse=True)
self.assertAllClose(val0, val2)
self.assertAllClose(val1, val3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/proximal_adagrad_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.exponential_decay"])
def exponential_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
If the argument `staircase` is `True`, then `global_step / decay_steps` is an
integer division and the decayed learning rate follows a staircase function.
Example: decay every 100000 steps with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate,
global_step,
100000, 0.96, staircase=True)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must
be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate, decay_steps, decay_rate, staircase=staircase, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.piecewise_constant_decay", "train.piecewise_constant"])
def piecewise_constant(x, boundaries, values, name=None):
"""Piecewise constant from boundaries and interval values.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
global_step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate = tf.compat.v1.train.piecewise_constant(global_step, boundaries,
values)
# Later, whenever we perform an optimization step, we increment global_step.
```
Args:
x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
values: A list of `Tensor`s or `float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Returns:
A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
`values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ...,
and values[-1] when `x > boundaries[-1]`.
Raises:
ValueError: if types of `x` and `boundaries` do not match, or types of all
`values` do not match or
the number of elements in the lists does not match.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
boundaries = ops.convert_n_to_tensor(boundaries)
values = ops.convert_n_to_tensor(values)
x_recomp = ops.convert_to_tensor(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We can promote int32 boundaries to int64 without loss of precision.
# This covers the most common case where the user passes in boundaries
# as an array of Python integers.
if (b.dtype.base_dtype == dtypes.int32 and
x_recomp.dtype.base_dtype == dtypes.int64):
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
else:
raise ValueError(
"Boundaries (%s) must have the same dtype as x (%s)." %
(b.dtype.base_dtype, x_recomp.dtype.base_dtype))
for v in values[1:]:
if v.dtype.base_dtype != values[0].dtype.base_dtype:
raise ValueError(
"Values must have elements all with the same dtype (%s vs %s)." %
(values[0].dtype.base_dtype, v.dtype.base_dtype))
decayed_lr = learning_rate_schedule.PiecewiseConstantDecay(
boundaries, values, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(x)
else:
decayed_lr = functools.partial(decayed_lr, x)
return decayed_lr
@tf_export(v1=["train.polynomial_decay"])
def polynomial_decay(learning_rate,
global_step,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This function applies a polynomial decay function to a provided initial
`learning_rate` to reach an `end_learning_rate` in the given `decay_steps`.
It requires a `global_step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `global_steps`.
```python
decay_steps = decay_steps * ceil(global_step / decay_steps)
decayed_learning_rate = (learning_rate - end_learning_rate) *
(1 - global_step / decay_steps) ^ (power) +
end_learning_rate
```
Example: decay from 0.1 to 0.01 in 10000 steps using sqrt (i.e. power=0.5):
```python
...
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate = tf.compat.v1.train.polynomial_decay(starter_learning_rate,
global_step,
decay_steps, end_learning_rate,
power=0.5)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation. Must not be negative.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must
be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python
number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a Python number. The
power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.PolynomialDecay(
learning_rate,
decay_steps,
end_learning_rate=end_learning_rate,
power=power,
cycle=cycle,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.natural_exp_decay"])
def natural_exp_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies natural exponential decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an exponential decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate * exp(-decay_rate * floor(global_step /
decay_step))
```
Example: decay exponentially with a base of 0.96:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 5
k = 0.5
learning_rate = tf.compat.v1.train.natural_exp_decay(learning_rate,
global_step,
decay_steps, k)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A Python number. Global step to use for the decay computation.
Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'ExponentialTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
natural_exp_rate = math_ops.exp(math_ops.negative(decay_rate))
decayed_lr = learning_rate_schedule.ExponentialDecay(
learning_rate,
decay_steps,
natural_exp_rate,
staircase=staircase,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.inverse_time_decay"])
def inverse_time_decay(learning_rate,
global_step,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies an inverse decay function
to a provided initial learning rate. It requires an `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * global_step /
decay_step)
```
or, if `staircase` is `True`, as:
```python
decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step /
decay_step))
```
Example: decay 1/t with a rate of 0.5:
```python
...
global_step = tf.Variable(0, trainable=False)
learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate = tf.compat.v1.train.inverse_time_decay(learning_rate,
global_step,
decay_steps, decay_rate)
# Passing global_step to minimize() will increment it at each step.
learning_step = (
tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
.minimize(...my loss..., global_step=global_step)
)
```
Args:
learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number.
The initial learning rate.
global_step: A Python number. Global step to use for the decay computation.
Must not be negative.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps, decay_rate, staircase=staircase, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.cosine_decay"])
def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None):
"""Applies cosine decay to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * global_step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number
of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum
learning rate value as a fraction of learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.CosineDecay(
learning_rate, decay_steps, alpha=alpha, name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.cosine_decay_restarts"])
def cosine_decay_restarts(learning_rate,
global_step,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
See [Loshchilov & Hutter, ICLR2016], SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a cosine decay function with
restarts to a provided initial learning rate. It requires a `global_step`
value to compute the decayed learning rate. You can just pass a TensorFlow
variable that you increment at each training step.
The function returns the decayed learning rate while taking into account
possible warm restarts. The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more steps
and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed = cosine_decay_restarts(learning_rate, global_step,
first_decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to
derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum
learning rate value as a fraction of the learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.CosineDecayRestarts(
learning_rate,
first_decay_steps,
t_mul=t_mul,
m_mul=m_mul,
alpha=alpha,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.linear_cosine_decay"])
def linear_cosine_decay(learning_rate,
global_step,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a linear cosine decay function
to a provided initial learning rate. It requires a `global_step` value to
compute the decayed learning rate. You can just pass a TensorFlow variable
that you increment at each training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed = linear_cosine_decay(learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number
of steps to decay over.
num_periods: Number of periods in the cosine part of the decay. See
computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.LinearCosineDecay(
learning_rate,
decay_steps,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
@tf_export(v1=["train.noisy_linear_cosine_decay"])
def noisy_linear_cosine_decay(learning_rate,
global_step,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This function applies a noisy linear
cosine decay function to a provided initial learning rate.
It requires a `global_step` value to compute the decayed learning rate.
You can just pass a TensorFlow variable that you increment at each
training step.
The function returns the decayed learning rate. It is computed as:
```python
global_step = min(global_step, decay_steps)
linear_decay = (decay_steps - global_step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * global_step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
decayed_learning_rate = learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed = noisy_linear_cosine_decay(
learning_rate, global_step, decay_steps)
```
Args:
learning_rate: A scalar `float32` or `float64` Tensor or a Python number.
The initial learning rate.
global_step: A scalar `int32` or `int64` `Tensor` or a Python number. Global
step to use for the decay computation.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number
of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay. See
computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
Returns:
A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
ValueError: if `global_step` is not supplied.
@compatibility(eager)
When eager execution is enabled, this function returns a function which in
turn returns the decayed learning rate Tensor. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
decayed_lr = learning_rate_schedule.NoisyLinearCosineDecay(
learning_rate,
decay_steps,
initial_variance=initial_variance,
variance_decay=variance_decay,
num_periods=num_periods,
alpha=alpha,
beta=beta,
name=name)
if not context.executing_eagerly():
decayed_lr = decayed_lr(global_step)
else:
decayed_lr = functools.partial(decayed_lr, global_step)
return decayed_lr
|
tensorflow-master
|
tensorflow/python/training/learning_rate_decay.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SessionManager."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager
class SessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.VariableV1([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.VariableV1([6.0, 7.0, 8.0], name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def _test_recovered_variable(self,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
# Create a new Graph and SessionManager and recover from a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
with session_lib.Session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
self._test_recovered_variable(checkpoint_dir=checkpoint_dir)
self._test_recovered_variable(
checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(
checkpoint_dir))
# Cannot set both checkpoint_dir and checkpoint_filename_with_path.
with self.assertRaises(ValueError):
self._test_recovered_variable(
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_management.latest_checkpoint(
checkpoint_dir))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
def testInitWithNoneLocalInitOpError(self):
# Creating a SessionManager with a None local_init_op but
# non-None ready_for_local_init_op raises ValueError
with self.assertRaisesRegexp(
ValueError, "If you pass a ready_for_local_init_op "
"you must also pass a local_init_op "):
session_manager.SessionManager(
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=None)
@test_util.run_v1_only("b/120545219")
def testRecoverSessionWithReadyForLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(),
"recover_session_ready_for_local_init")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
# We use ready_for_local_init_op=report_uninitialized_variables(),
# which causes recover_session to not run local_init_op, and to return
# initialized=False
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_to_ready_local")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionNoChkptStillRunsLocalInitOp(self):
# This test checks for backwards compatibility.
# In particular, we continue to ensure that recover_session will execute
# local_init_op exactly once, regardless of whether the session was
# successfully recovered.
with ops.Graph().as_default():
w = variables.VariableV1(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
# Try to recover session from None
sess, initialized = sm2.recover_session(
"", saver=None, checkpoint_dir=None)
# Succeeds because recover_session still run local_init_op
self.assertFalse(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testRecoverSessionFailsStillRunsLocalInitOp(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(
self.get_temp_dir(),
"recover_session_ready_for_local_init_fails_stil_run")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
w = variables.VariableV1(
1,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"",
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=False)
self.assertFalse(initialized)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionLocalInit(self):
server = server_lib.Server.create_local_server()
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
# Initialize v but not w
s = session_lib.Session(server.target, graph=graph)
s.run(v.initializer)
sess = sm.wait_for_session(server.target, max_wait_secs=3)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
def testWaitForSessionWithReadyForLocalInitOpFailsToReadyLocal(self):
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(),
local_init_op=w.initializer)
with self.assertRaises(errors_impl.DeadlineExceededError):
# Time-out because w fails to be initialized,
# because of overly restrictive ready_for_local_init_op
sm.wait_for_session("", max_wait_secs=3)
@test_util.run_v1_only("b/120545219")
def testWaitForSessionInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default() as graph:
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
sm = session_manager.SessionManager(
graph=graph,
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(errors_impl.DeadlineExceededError,
"Session was not ready after waiting.*"):
sm.wait_for_session("", max_wait_secs=3)
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithReadyForLocalInitOp(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.VariableV1(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=[w.initializer, x.initializer])
sess = sm2.prepare_session("", init_op=v.initializer)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithPartialInitOp(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
x = variables.VariableV1(
3 * v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x")
# TODO(b/70206927): Use ResourceVariables once they are handled properly.
v_res = variables.VariableV1(1, name="v_res")
w_res = variables.VariableV1(
v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w_res")
x_res = variables.VariableV1(
3 * v_res,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="x_res")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
self.assertEqual(False, variables.is_variable_initialized(x).eval())
self.assertEqual(False, variables.is_variable_initialized(v_res).eval())
self.assertEqual(False, variables.is_variable_initialized(w_res).eval())
self.assertEqual(False, variables.is_variable_initialized(x_res).eval())
sm2 = session_manager.SessionManager(local_init_op=[
w.initializer, x.initializer, w_res.initializer, x_res.initializer
])
sess = sm2.prepare_session("", init_op=None)
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x:0")).eval(session=sess))
self.assertEquals(1, sess.run(w))
self.assertEquals(3, sess.run(x))
self.assertEqual(
False,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("w_res:0")).eval(session=sess))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("x_res:0")).eval(session=sess))
self.assertEquals(1, sess.run(w_res))
self.assertEquals(3, sess.run(x_res))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithCyclicInitializer(self):
# Regression test. Previously Variable._build_initializer_expr would enter
# into an infinite recursion when the variable's initial_value involved
# cyclic dependencies.
with ops.Graph().as_default():
i = control_flow_ops.while_loop(lambda i: i < 1, lambda i: i + 1, [0])
v = variables.VariableV1(array_ops.identity(i), name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
sess = sm.prepare_session("", init_op=v.initializer)
self.assertEqual(1, sess.run(v))
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
def testPrepareSessionDidNotInitLocalVariable(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(
RuntimeError, "Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=v.initializer)
def testPrepareSessionDidNotInitLocalVariableList(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables())
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready"):
sm2.prepare_session("", init_op=[v.initializer])
def testPrepareSessionWithReadyNotReadyForLocal(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=variables.report_uninitialized_variables(
variables.global_variables()),
local_init_op=w.initializer)
with self.assertRaisesRegexp(
RuntimeError,
"Init operations did not make model ready for local_init"):
sm2.prepare_session("", init_op=None)
@test_util.run_v1_only("b/120545219")
def testPrepareSessionWithInsufficientReadyForLocalInitCheck(self):
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="w")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
self.assertEqual(False, variables.is_variable_initialized(w).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.report_uninitialized_variables(),
ready_for_local_init_op=None,
local_init_op=w.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Init operations did not make model ready.*"):
sm2.prepare_session("", init_op=None)
class ObsoleteSessionManagerTest(test.TestCase):
def testPrepareSessionSucceeds(self):
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_op=variables.global_variables_initializer())
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFeedDict(self):
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
def testPrepareSessionSucceedsWithInitFn(self):
with ops.Graph().as_default():
v = variables.VariableV1([125], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
sess = sm.prepare_session(
"", init_fn=lambda sess: sess.run(v.initializer))
self.assertAllClose([125], sess.run(v))
@test_util.run_v1_only("b/120545219")
def testPrepareSessionFails(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session")
checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2")
try:
gfile.DeleteRecursively(checkpoint_dir)
gfile.DeleteRecursively(checkpoint_dir2)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess = sm.prepare_session(
"",
init_op=variables.global_variables_initializer(),
saver=saver,
checkpoint_dir=checkpoint_dir)
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
checkpoint_filename = os.path.join(checkpoint_dir,
"prepare_session_checkpoint")
saver.save(sess, checkpoint_filename)
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
# Renames the checkpoint directory.
os.rename(checkpoint_dir, checkpoint_dir2)
gfile.MakeDirs(checkpoint_dir)
v = variables.VariableV1([6.0, 7.0, 8.0], name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
# This should fail as there's no checkpoint within 2 seconds.
with self.assertRaisesRegexp(
RuntimeError, "no init_op or init_fn or local_init_op was given"):
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
# Rename the checkpoint directory back.
gfile.DeleteRecursively(checkpoint_dir)
os.rename(checkpoint_dir2, checkpoint_dir)
# This should succeed as there's checkpoint.
sess = sm.prepare_session(
"",
init_op=None,
saver=saver,
checkpoint_dir=checkpoint_dir,
wait_for_checkpoint=True,
max_wait_secs=2)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
@test_util.run_v1_only("b/120545219")
def testRecoverSession(self):
# Create a checkpoint.
checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session")
try:
gfile.DeleteRecursively(checkpoint_dir)
except errors.OpError:
pass # Ignore
gfile.MakeDirs(checkpoint_dir)
with ops.Graph().as_default():
v = variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertFalse(initialized)
sess.run(v.initializer)
self.assertEquals(1, sess.run(v))
saver.save(sess, os.path.join(checkpoint_dir,
"recover_session_checkpoint"))
# Create a new Graph and SessionManager and recover.
with ops.Graph().as_default():
v = variables.VariableV1(2, name="v")
with self.cached_session():
self.assertEqual(False, variables.is_variable_initialized(v).eval())
sm2 = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized())
saver = saver_lib.Saver({"v": v})
sess, initialized = sm2.recover_session(
"", saver=saver, checkpoint_dir=checkpoint_dir)
self.assertTrue(initialized)
self.assertEqual(
True,
variables.is_variable_initialized(
sess.graph.get_tensor_by_name("v:0")).eval(session=sess))
self.assertEquals(1, sess.run(v))
@test_util.run_v1_only("b/120545219")
def testWaitForSessionReturnsNoneAfterTimeout(self):
with ops.Graph().as_default():
variables.VariableV1(1, name="v")
sm = session_manager.SessionManager(
ready_op=variables.assert_variables_initialized(),
recovery_wait_secs=1)
# Set max_wait_secs to allow us to try a few times.
with self.assertRaises(errors.DeadlineExceededError):
sm.wait_for_session(master="", max_wait_secs=3)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/session_manager_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables.
Symbols in this file are deprecated. See replacements in
tensorflow/python/training/trackable and tensorflow/python/training/saving.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import time
import uuid
import numpy as np
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.core.protobuf import trackable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import training_util
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# TODO(allenl): Remove these aliases once all users are migrated off.
get_checkpoint_state = checkpoint_management.get_checkpoint_state
update_checkpoint_state = checkpoint_management.update_checkpoint_state
generate_checkpoint_state_proto = (
checkpoint_management.generate_checkpoint_state_proto)
latest_checkpoint = checkpoint_management.latest_checkpoint
checkpoint_exists = checkpoint_management.checkpoint_exists
get_checkpoint_mtimes = checkpoint_management.get_checkpoint_mtimes
remove_checkpoint = checkpoint_management.remove_checkpoint
class BaseSaverBuilder(object):
"""Base class for Savers.
Can be extended to create different Ops.
"""
SaveSpec = saveable_object.SaveSpec
SaveableObject = saveable_object.SaveableObject
# Aliases for code which was moved but still has lots of users.
VariableSaveable = saveable_object_util.ReferenceVariableSaveable
ResourceVariableSaveable = saveable_object_util.ResourceVariableSaveable
def __init__(self, write_version=saver_pb2.SaverDef.V2):
self._write_version = write_version
def save_op(self, filename_tensor, saveables):
"""Create an Op to save 'saveables'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
An Operation that save the variables.
Raises:
RuntimeError: (implementation detail) if "self._write_version" is an
unexpected value.
"""
# pylint: disable=protected-access
tensor_names = []
tensors = []
tensor_slices = []
for saveable in saveables:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
if self._write_version == saver_pb2.SaverDef.V1:
return io_ops._save(
filename=filename_tensor,
tensor_names=tensor_names,
tensors=tensors,
tensor_slices=tensor_slices)
elif self._write_version == saver_pb2.SaverDef.V2:
# "filename_tensor" is interpreted *NOT AS A FILENAME*, but as a prefix
# of a V2 checkpoint: e.g. "/fs/train/ckpt-<step>/tmp/worker<i>-<step>".
return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices,
tensors)
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
"""Restore all tensors contained in saveables.
By default, this issues separate calls to `restore_op` for each saveable.
Subclasses may override to load multiple saveables in a single call.
Args:
filename_tensor: String Tensor.
saveables: List of BaseSaverBuilder.SaveableObject objects.
preferred_shard: Int. Shard to open first when loading a sharded file.
restore_sequentially: Unused. Bool. If true, each restore is sequential.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
del restore_sequentially
all_tensors = []
for saveable in saveables:
if saveable.device:
device = saveable_object_util.set_cpu0(saveable.device)
else:
device = None
with ops.device(device):
all_tensors.extend(
self.restore_op(filename_tensor, saveable, preferred_shard))
return all_tensors
# pylint: disable=unused-argument
def restore_op(self, filename_tensor, saveable, preferred_shard):
"""Create ops to restore 'saveable'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveable: A BaseSaverBuilder.SaveableObject object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
# pylint: disable=protected-access
tensors = []
for spec in saveable.specs:
tensors.append(
io_ops.restore_v2(filename_tensor, [spec.name], [spec.slice_spec],
[spec.dtype])[0])
return tensors
# pylint: enable=unused-argument
def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
def _AddSaveOps(self, filename_tensor, saveables):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
saveables: A list of SaveableObject objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, saveables)
return control_flow_ops.with_dependencies([save], filename_tensor)
def _AddShardedSaveOpsForV2(self, checkpoint_prefix, per_device):
"""Add ops to save the params per shard, for the V2 format.
Note that the sharded save procedure for the V2 format is different from
V1: there is a special "merge" step that merges the small metadata produced
from each device.
Args:
checkpoint_prefix: scalar String Tensor. Interpreted *NOT AS A FILENAME*,
but as a prefix of a V2 checkpoint;
per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables, which, when evaluated, returns the prefix
"<user-fed prefix>" only and does not include the sharded spec suffix.
"""
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><_SHARDED_SUFFIX>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
_SHARDED_SUFFIX = "_temp_%s/part" % uuid.uuid4().hex
tmp_checkpoint_prefix = string_ops.string_join(
[checkpoint_prefix, _SHARDED_SUFFIX])
num_shards = len(per_device)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saveables) in enumerate(per_device):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
sharded_filename = self.sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(sharded_filename)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
with ops.control_dependencies([x.op for x in sharded_saves]):
# Co-locates the merge step with the last device.
with ops.device(saveable_object_util.set_cpu0(last_device)):
# V2 format write path consists of a metadata merge step. Once merged,
# attempts to delete the temporary directory, "<user-fed prefix>_temp".
merge_step = gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, checkpoint_prefix, delete_old_dirs=True)
with ops.control_dependencies([merge_step]):
# Returns the prefix "<user-fed prefix>" only. DOES NOT include the
# sharded spec suffix.
return array_ops.identity(checkpoint_prefix)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: a scalar String Tensor.
per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
if self._write_version == saver_pb2.SaverDef.V2:
return self._AddShardedSaveOpsForV2(filename_tensor, per_device)
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(filename_tensor, shard,
num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
return gen_io_ops.sharded_filespec(filename_tensor, num_shards_tensor)
def _AddRestoreOps(self,
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=-1,
name="restore_all"):
"""Add operations to restore saveables.
Args:
filename_tensor: Tensor for the path of the file to load.
saveables: A list of SaveableObject objects.
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of the
corresponding variable.
preferred_shard: Shard to open first when loading a sharded file.
name: Name for the returned op.
Returns:
An Operation that restores the variables.
"""
all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard,
restore_sequentially)
assign_ops = []
idx = 0
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
for saveable in saveables:
shapes = None
if reshape:
# Compute the shapes, let the restore op decide if and how to do
# the reshape.
shapes = []
for spec in saveable.specs:
v = spec.tensor
shape = v.get_shape()
if not shape.is_fully_defined():
shape = array_ops.shape(v)
shapes.append(shape)
saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]
idx += len(saveable.specs)
assign_ops.append(saveable.restore(saveable_tensors, shapes))
# Create a Noop that has control dependencies from all the updates.
return control_flow_ops.group(*assign_ops, name=name)
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to restore variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, SaveableObject) pairs, as returned by
_GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of the
corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(
self._AddRestoreOps(
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
def _GroupByDevices(self, saveables):
"""Group Variable tensor slices per device.
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices are unspecified.
Args:
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
A list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples.
The list is sorted by ascending device_name.
Raises:
ValueError: If the tensors of a saveable are on different devices.
"""
per_device = collections.defaultdict(lambda: [])
for saveable in saveables:
canonical_device = set(
pydev.canonical_name(spec.tensor.device) for spec in saveable.specs)
if len(canonical_device) != 1:
raise ValueError("All tensors of a saveable object must be "
"on the same device: %s" % saveable.name)
per_device[canonical_device.pop()].append(saveable)
return sorted(per_device.items(), key=lambda t: t[0])
def build(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model"):
"""Builds save/restore graph nodes or runs save/restore in eager mode.
Args:
names_to_saveables: A dictionary mapping name to a Variable or
SaveableObject. Each name will be associated with the corresponding
variable in the checkpoint.
reshape: If True, allow restoring parameters from a checkpoint that where
the parameters have a different shape. This is only needed when you try
to restore from a Dist-Belief checkpoint, and only some times.
sharded: If True, shard the checkpoints, one per device that has Variable
nodes.
max_to_keep: Maximum number of checkpoints to keep. As new checkpoints
are created, old ones are deleted. If None or 0, no checkpoints are
deleted from the filesystem but only the last one is kept in the
`checkpoint` file. Presently the number is only roughly enforced. For
example in case of restarts more than max_to_keep checkpoints may be
kept.
keep_checkpoint_every_n_hours: How often checkpoints should be kept.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A Bool, which if true, causes restore of different
variables to happen sequentially within each device.
filename: If known at graph construction time, filename used for variable
loading/saving. If None, then the default name "model" will be used.
Returns:
A SaverDef proto.
Raises:
TypeError: If 'names_to_saveables' is not a dictionary mapping string
keys to variable Tensors.
ValueError: If any of the keys or values in 'names_to_saveables' is not
unique.
"""
return self._build_internal(
names_to_saveables=names_to_saveables,
reshape=reshape,
sharded=sharded,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
name=name,
restore_sequentially=restore_sequentially,
filename=filename)
def _build_internal(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model",
build_save=True,
build_restore=True):
"""build() with option to only perform save and restore."""
if not context.executing_eagerly() and (not build_save or
not build_restore):
raise ValueError("save and restore operations need to be built together "
" when eager execution is not enabled.")
saveables = saveable_object_util.validate_and_slice_inputs(
names_to_saveables)
if max_to_keep is None:
max_to_keep = 0
with ops.name_scope(name, "save",
[saveable.op for saveable in saveables]) as name:
# Add a placeholder string tensor for the filename.
filename_tensor = array_ops.placeholder_with_default(
filename or "model", shape=(), name="filename")
# Keep the name "Const" for backwards compatibility.
filename_tensor = array_ops.placeholder_with_default(
filename_tensor, shape=(), name="Const")
# Add the save ops.
if sharded:
per_device = self._GroupByDevices(saveables)
if build_save:
save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)
if build_restore:
restore_op = self._AddShardedRestoreOps(filename_tensor, per_device,
restore_sequentially, reshape)
else:
if build_save:
save_tensor = self._AddSaveOps(filename_tensor, saveables)
if build_restore:
restore_op = self._AddRestoreOps(filename_tensor, saveables,
restore_sequentially, reshape)
# In the following use case, it's possible to have restore_ops be called
# something else:
# - Build inference graph and export a meta_graph.
# - Import the inference meta_graph
# - Extend the inference graph to a train graph.
# - Export a new meta_graph.
# Now the second restore_op will be called "restore_all_1".
# As such, comment out the assert for now until we know whether supporting
# such usage model makes sense.
#
# assert restore_op.name.endswith("restore_all"), restore_op.name
if context.executing_eagerly():
# Store the tensor values to the tensor_names.
save_tensor_name = save_tensor.numpy() if build_save else ""
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.numpy(),
save_tensor_name=save_tensor_name,
restore_op_name="",
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
else:
graph = ops.get_default_graph()
# Do some sanity checking on collections containing
# PartitionedVariables. If a saved collection has a PartitionedVariable,
# the GraphDef needs to include concat ops to get the value (or there'll
# be a lookup error on load).
check_collection_list = graph.get_all_collection_keys()
for collection_type in check_collection_list:
for element in graph.get_collection(collection_type):
if isinstance(element, variables.PartitionedVariable):
try:
graph.get_operation_by_name(element.name)
except KeyError:
# Create a concat op for this PartitionedVariable. The user may
# not need it, but we'll try looking it up on MetaGraph restore
# since it's in a collection.
element.as_tensor()
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
class BulkSaverBuilder(BaseSaverBuilder):
"""SaverBuilder with support for bulk restoring multiple saveables."""
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
# Ignored: bulk restore is internally sequential.
del restore_sequentially
restore_specs = []
for saveable in saveables:
for spec in saveable.specs:
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
names, slices, dtypes = zip(*restore_specs)
# Load all tensors onto CPU 0 for compatibility with existing code.
with ops.device("cpu:0"):
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
def _get_saver_or_default():
"""Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`Scaffold`, or `CheckpointSaverHook`.
Returns:
`Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items.
"""
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if savers:
if len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
return savers[0]
saver = Saver(sharded=True, allow_empty=True)
if saver is not None:
ops.add_to_collection(collection_key, saver)
return saver
@tf_export(v1=["train.Saver"])
class Saver(object):
"""Saves and restores variables.
See [Variables](https://tensorflow.org/guide/variables)
for an overview of variables, saving and restoring.
The `Saver` class adds ops to save and restore variables to and from
*checkpoints*. It also provides convenience methods to run these ops.
Checkpoints are binary files in a proprietary format which map variable names
to tensor values. The best way to examine the contents of a checkpoint is to
load it using a `Saver`.
Savers can automatically number checkpoint filenames with a provided counter.
This lets you keep multiple checkpoints at different steps while training a
model. For example you can number the checkpoint filenames with the training
step number. To avoid filling up disks, savers manage checkpoint files
automatically. For example, they can keep only the N most recent files, or
one checkpoint for every N hours of training.
You number checkpoint filenames by passing a value to the optional
`global_step` argument to `save()`:
```python
saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'
...
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
```
Additionally, optional arguments to the `Saver()` constructor let you control
the proliferation of checkpoint files on disk:
* `max_to_keep` indicates the maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
no checkpoints are deleted from the filesystem but only the last one is
kept in the `checkpoint` file. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
* `keep_checkpoint_every_n_hours`: In addition to keeping the most recent
`max_to_keep` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
Note that you still have to call the `save()` method to save the model.
Passing these arguments to the constructor will not save variables
automatically for you.
A training program that saves regularly looks like:
```python
...
# Create a saver.
saver = tf.compat.v1.train.Saver(...variables...)
# Launch the graph and train, saving the model every 1,000 steps.
sess = tf.compat.v1.Session()
for step in xrange(1000000):
sess.run(..training_op..)
if step % 1000 == 0:
# Append the step number to the checkpoint name:
saver.save(sess, 'my-model', global_step=step)
```
In addition to checkpoint files, savers keep a protocol buffer on disk with
the list of recent checkpoints. This is used to manage numbered checkpoint
files and by `latest_checkpoint()`, which makes it easy to discover the path
to the most recent checkpoint. That protocol buffer is stored in a file named
'checkpoint' next to the checkpoint files.
If you create several savers, you can specify a different filename for the
protocol buffer file in the call to `save()`.
"""
def __init__(self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=False,
write_version=saver_pb2.SaverDef.V2,
pad_step_number=False,
save_relative_paths=False,
filename=None):
"""Creates a `Saver`.
The constructor adds ops to save and restore variables.
`var_list` specifies the variables that will be saved and restored. It can
be passed as a `dict` or a list:
* A `dict` of names to variables: The keys are the names that will be
used to save or restore the variables in the checkpoint files.
* A list of variables: The variables will be keyed with their op name in
the checkpoint files.
For example:
```python
v1 = tf.Variable(..., name='v1')
v2 = tf.Variable(..., name='v2')
# Pass the variables as a dict:
saver = tf.compat.v1.train.Saver({'v1': v1, 'v2': v2})
# Or pass them as a list.
saver = tf.compat.v1.train.Saver([v1, v2])
# Passing a list is equivalent to passing a dict with the variable op names
# as keys:
saver = tf.compat.v1.train.Saver({v.op.name: v for v in [v1, v2]})
```
The optional `reshape` argument, if `True`, allows restoring a variable from
a save file where the variable had a different shape, but the same number
of elements and type. This is useful if you have reshaped a variable and
want to reload it from an older checkpoint.
The optional `sharded` argument, if `True`, instructs the saver to shard
checkpoints per device.
Args:
var_list: A list of `Variable`/`SaveableObject`, or a dictionary mapping
names to `SaveableObject`s. If `None`, defaults to the list of all
saveable objects.
reshape: If `True`, allows restoring parameters from a checkpoint where
the variables have a different shape.
sharded: If `True`, shard the checkpoints, one per device.
max_to_keep: Maximum number of recent checkpoints to keep. Defaults to 5.
keep_checkpoint_every_n_hours: How often to keep checkpoints. Defaults to
10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A `Bool`, which if true, causes restore of different
variables to happen sequentially within each device. This can lower
memory usage when restoring very large models.
saver_def: Optional `SaverDef` proto to use instead of running the
builder. This is only useful for specialty code that wants to recreate a
`Saver` object for a previously built `Graph` that had a `Saver`. The
`saver_def` proto should be the one returned by the `as_saver_def()`
call of the `Saver` that was created for that `Graph`.
builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.
Defaults to `BulkSaverBuilder()`.
defer_build: If `True`, defer adding the save and restore ops to the
`build()` call. In that case `build()` should be called before
finalizing the graph or using the saver.
allow_empty: If `False` (default) raise an error if there are no variables
in the graph. Otherwise, construct the saver anyway and make it a no-op.
write_version: controls what format to use when saving checkpoints. It
also affects certain filepath matching logic. The V2 format is the
recommended choice: it is much more optimized than V1 in terms of memory
required and latency incurred during restore. Regardless of this
flag, the Saver is able to restore from both V2 and V1 checkpoints.
pad_step_number: if True, pads the global step number in the checkpoint
filepaths to some fixed width (8 by default). This is turned off by
default.
save_relative_paths: If `True`, will write relative paths to the
checkpoint state file. This is needed if the user wants to copy the
checkpoint directory and reload from the copied directory.
filename: If known at graph construction time, filename used for variable
loading/saving.
Raises:
TypeError: If `var_list` is invalid.
ValueError: If any of the keys or values in `var_list` are not unique.
RuntimeError: If eager execution is enabled and`var_list` does not specify
a list of varialbes to save.
@compatibility(eager)
When eager execution is enabled, `var_list` must specify a `list` or `dict`
of variables to save. Otherwise, a `RuntimeError` will be raised.
Although Saver works in some cases when executing eagerly, it is
fragile. Please switch to `tf.train.Checkpoint` or
`tf.keras.Model.save_weights`, which perform a more robust object-based
saving. These APIs will load checkpoints written by `Saver`.
@end_compatibility
"""
if defer_build and var_list:
raise ValueError(
"If `var_list` is provided then build cannot be deferred. "
"Either set defer_build=False or var_list=None.")
if context.executing_eagerly():
logging.warning(
"Saver is deprecated, please switch to tf.train.Checkpoint or "
"tf.keras.Model.save_weights for training checkpoints. When "
"executing eagerly variables do not necessarily have unique names, "
"and so the variable.name-based lookups Saver performs are "
"error-prone.")
if var_list is None:
raise RuntimeError(
"When eager execution is enabled, `var_list` must specify a list "
"or dict of variables to save")
self._var_list = var_list
self._reshape = reshape
self._sharded = sharded
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._name = name
self._restore_sequentially = restore_sequentially
self.saver_def = saver_def
self._builder = builder
self._is_built = False
self._allow_empty = allow_empty
self._is_empty = None
self._write_version = write_version
self._pad_step_number = pad_step_number
self._filename = filename
self._last_checkpoints = []
self._checkpoints_to_be_deleted = []
if context.executing_eagerly():
self._next_checkpoint_time = (
time.time() + self._keep_checkpoint_every_n_hours * 3600)
elif not defer_build:
self.build()
if self.saver_def:
self._check_saver_def()
self._write_version = self.saver_def.version
self._save_relative_paths = save_relative_paths
# For compatibility with object-based checkpoints, we may build a second
# Saver to read the renamed keys.
self._object_restore_saver = None
def build(self):
if context.executing_eagerly():
raise RuntimeError("Use save/restore instead of build in eager mode.")
self._build(self._filename, build_save=True, build_restore=True)
def _build_eager(self, checkpoint_path, build_save, build_restore):
self._build(
checkpoint_path, build_save=build_save, build_restore=build_restore)
def _build(self, checkpoint_path, build_save, build_restore):
"""Builds saver_def."""
if not context.executing_eagerly():
if self._is_built:
return
self._is_built = True
if not self.saver_def or context.executing_eagerly():
if self._builder is None:
self._builder = BulkSaverBuilder(self._write_version)
if self._var_list is None:
# pylint: disable=protected-access
self._var_list = variables._all_saveable_objects()
if not self._var_list:
if self._allow_empty:
self._is_empty = True
return
else:
raise ValueError("No variables to save")
self._is_empty = False
self.saver_def = self._builder._build_internal( # pylint: disable=protected-access
self._var_list,
reshape=self._reshape,
sharded=self._sharded,
max_to_keep=self._max_to_keep,
keep_checkpoint_every_n_hours=self._keep_checkpoint_every_n_hours,
name=self._name,
restore_sequentially=self._restore_sequentially,
filename=checkpoint_path,
build_save=build_save,
build_restore=build_restore)
elif self.saver_def and self._name:
# Since self._name is used as a name_scope by builder(), we are
# overloading the use of this field to represent the "import_scope" as
# well.
self.saver_def.filename_tensor_name = ops.prepend_name_scope(
self.saver_def.filename_tensor_name, self._name)
self.saver_def.save_tensor_name = ops.prepend_name_scope(
self.saver_def.save_tensor_name, self._name)
self.saver_def.restore_op_name = ops.prepend_name_scope(
self.saver_def.restore_op_name, self._name)
self._check_saver_def()
if not context.executing_eagerly():
# Updates next checkpoint time.
# Set in __init__ when executing eagerly.
self._next_checkpoint_time = (
time.time() + self.saver_def.keep_checkpoint_every_n_hours * 3600)
def _check_saver_def(self):
if not isinstance(self.saver_def, saver_pb2.SaverDef):
raise ValueError("saver_def must be a saver_pb2.SaverDef: %s" %
self.saver_def)
if not context.executing_eagerly():
if not self.saver_def.save_tensor_name:
raise ValueError("saver_def must specify the save_tensor_name: %s" %
str(self.saver_def))
if not self.saver_def.restore_op_name:
raise ValueError("saver_def must specify the restore_op_name: %s" %
str(self.saver_def))
def _CheckpointFilename(self, p):
"""Returns the checkpoint filename given a `(filename, time)` pair.
Args:
p: (filename, time) pair.
Returns:
Checkpoint file name.
"""
name, _ = p
return name
def _RecordLastCheckpoint(self, latest_save_path):
"""Manages the list of the latest checkpoints."""
if not self.saver_def.max_to_keep:
return
# Remove first from list if the same name was used before.
for p in self._last_checkpoints:
if latest_save_path == self._CheckpointFilename(p):
self._last_checkpoints.remove(p)
# Append new path to list
self._last_checkpoints.append((latest_save_path, time.time()))
# If more than max_to_keep, remove oldest.
if len(self._last_checkpoints) > self.saver_def.max_to_keep:
self._checkpoints_to_be_deleted.append(self._last_checkpoints.pop(0))
def _MaybeDeleteOldCheckpoints(self, meta_graph_suffix="meta"):
"""Deletes old checkpoints if necessary.
`self._checkpoints_to_be_deleted` is going to contain checkpoints that are
over `max_to_keep`. They are going to be deleted. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
if self._checkpoints_to_be_deleted:
p = self._checkpoints_to_be_deleted.pop(0)
# Do not delete the file if we keep_checkpoint_every_n_hours is set and we
# have reached N hours of training.
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += (
self.saver_def.keep_checkpoint_every_n_hours * 3600)
return
# Otherwise delete the files.
try:
checkpoint_management.remove_checkpoint(
self._CheckpointFilename(p), self.saver_def.version,
meta_graph_suffix)
except Exception as e: # pylint: disable=broad-except
logging.warning("Ignoring: %s", str(e))
def as_saver_def(self):
"""Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
"""
return self.saver_def
def to_proto(self, export_scope=None):
"""Converts this `Saver` to a `SaverDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaverDef` protocol buffer.
"""
if export_scope is None:
return self.saver_def
if not (self.saver_def.filename_tensor_name.startswith(export_scope) and
self.saver_def.save_tensor_name.startswith(export_scope) and
self.saver_def.restore_op_name.startswith(export_scope)):
return None
saver_def = saver_pb2.SaverDef()
saver_def.CopyFrom(self.saver_def)
saver_def.filename_tensor_name = ops.strip_name_scope(
saver_def.filename_tensor_name, export_scope)
saver_def.save_tensor_name = ops.strip_name_scope(
saver_def.save_tensor_name, export_scope)
saver_def.restore_op_name = ops.strip_name_scope(saver_def.restore_op_name,
export_scope)
return saver_def
@staticmethod
def from_proto(saver_def, import_scope=None):
"""Returns a `Saver` object created from `saver_def`.
Args:
saver_def: a `SaverDef` protocol buffer.
import_scope: Optional `string`. Name scope to use.
Returns:
A `Saver` built from saver_def.
"""
return Saver(saver_def=saver_def, name=import_scope)
@property
def last_checkpoints(self):
"""List of not-yet-deleted checkpoint filenames.
You can pass any of the returned values to `restore()`.
Returns:
A list of checkpoint filenames, sorted from oldest to newest.
"""
return list(self._CheckpointFilename(p) for p in self._last_checkpoints)
def set_last_checkpoints(self, last_checkpoints):
"""DEPRECATED: Use set_last_checkpoints_with_time.
Sets the list of old checkpoint filenames.
Args:
last_checkpoints: A list of checkpoint filenames.
Raises:
AssertionError: If last_checkpoints is not a list.
"""
assert isinstance(last_checkpoints, list)
# We use a timestamp of +inf so that this checkpoint will never be
# deleted. This is both safe and backwards compatible to a previous
# version of the code which used s[1] as the "timestamp".
self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
def set_last_checkpoints_with_time(self, last_checkpoints_with_time):
"""Sets the list of old checkpoint filenames and timestamps.
Args:
last_checkpoints_with_time: A list of tuples of checkpoint filenames and
timestamps.
Raises:
AssertionError: If last_checkpoints_with_time is not a list.
"""
assert isinstance(last_checkpoints_with_time, list)
self._last_checkpoints = last_checkpoints_with_time
def recover_last_checkpoints(self, checkpoint_paths):
"""Recovers the internal saver state after a crash.
This method is useful for recovering the "self._last_checkpoints" state.
Globs for the checkpoints pointed to by `checkpoint_paths`. If the files
exist, use their mtime as the checkpoint timestamp.
Args:
checkpoint_paths: a list of checkpoint paths.
"""
checkpoints_with_mtimes = []
for checkpoint_path in checkpoint_paths:
mtime = checkpoint_management.get_checkpoint_mtimes([checkpoint_path])
if mtime:
checkpoints_with_mtimes.append((checkpoint_path, mtime[0]))
self.set_last_checkpoints_with_time(checkpoints_with_mtimes)
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False,
save_debug_info=False):
# pylint: disable=line-too-long
"""Saves variables.
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path prefix of the newly created checkpoint files.
This string can be passed directly to a call to `restore()`.
Args:
sess: A Session to use to save the variables.
save_path: String. Prefix of filenames created for the checkpoint.
global_step: If provided the global step number is appended to `save_path`
to create the checkpoint filenames. The optional argument can be a
`Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoints. That file, kept in the
same directory as the checkpoint files, is automatically managed by the
saver to keep track of recent checkpoints. Defaults to 'checkpoint'.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
write_meta_graph: `Boolean` indicating whether or not to write the meta
graph file.
write_state: `Boolean` indicating whether or not to write the
`CheckpointStateProto`.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of save_path and with `_debug` added before
the file extension. This is only enabled when `write_meta_graph` is
`True`
Returns:
A string: path prefix used for the checkpoint files. If the saver is
sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'
is the number of shards created.
If the saver is empty, returns None.
Raises:
TypeError: If `sess` is not a `Session`.
ValueError: If `latest_filename` contains path components, or if it
collides with `save_path`.
RuntimeError: If save and restore ops weren't built.
"""
# pylint: enable=line-too-long
if not self._is_built and not context.executing_eagerly():
raise RuntimeError(
"`build()` should be called before save if defer_build==True")
if latest_filename is None:
latest_filename = "checkpoint"
if self._write_version != saver_pb2.SaverDef.V2:
logging.warning("*******************************************************")
logging.warning("TensorFlow's V1 checkpoint format has been deprecated.")
logging.warning("Consider switching to the more efficient V2 format:")
logging.warning(" `tf.train.Saver(write_version=tf.train.SaverDef.V2)`")
logging.warning("now on by default.")
logging.warning("*******************************************************")
if os.path.split(latest_filename)[0]:
raise ValueError("'latest_filename' must not contain path components")
if global_step is not None:
if not isinstance(global_step, compat.integral_types):
global_step = training_util.global_step(sess, global_step)
checkpoint_file = "%s-%d" % (save_path, global_step)
if self._pad_step_number:
# Zero-pads the step numbers, so that they are sorted when listed.
checkpoint_file = "%s-%s" % (save_path, "{:08d}".format(global_step))
else:
checkpoint_file = save_path
if os.path.basename(save_path) == latest_filename and not self._sharded:
# Guard against collision between data file and checkpoint state file.
raise ValueError(
"'latest_filename' collides with 'save_path': '%s' and '%s'" %
(latest_filename, save_path))
if (not context.executing_eagerly() and
not isinstance(sess, session.SessionInterface)):
raise TypeError("'sess' must be a Session; %s" % sess)
save_path_parent = os.path.dirname(save_path)
if not self._is_empty:
try:
if context.executing_eagerly():
self._build_eager(
checkpoint_file, build_save=True, build_restore=False)
model_checkpoint_path = self.saver_def.save_tensor_name
else:
model_checkpoint_path = sess.run(
self.saver_def.save_tensor_name,
{self.saver_def.filename_tensor_name: checkpoint_file})
model_checkpoint_path = compat.as_str(model_checkpoint_path)
if write_state:
self._RecordLastCheckpoint(model_checkpoint_path)
checkpoint_management.update_checkpoint_state_internal(
save_dir=save_path_parent,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=self.last_checkpoints,
latest_filename=latest_filename,
save_relative_paths=self._save_relative_paths)
self._MaybeDeleteOldCheckpoints(meta_graph_suffix=meta_graph_suffix)
except (errors.FailedPreconditionError, errors.NotFoundError) as exc:
if not gfile.IsDirectory(save_path_parent):
exc = ValueError(
"Parent directory of {} doesn't exist, can't save.".format(
save_path))
raise exc
if write_meta_graph:
meta_graph_filename = checkpoint_management.meta_graph_filename(
checkpoint_file, meta_graph_suffix=meta_graph_suffix)
if not context.executing_eagerly():
with sess.graph.as_default():
self.export_meta_graph(
meta_graph_filename,
strip_default_attrs=strip_default_attrs,
save_debug_info=save_debug_info)
if self._is_empty:
return None
else:
return model_checkpoint_path
def export_meta_graph(self,
filename=None,
collection_list=None,
as_text=False,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False,
save_debug_info=False):
# pylint: disable=line-too-long
"""Writes `MetaGraphDef` to save_path/filename.
Args:
filename: Optional meta_graph filename including the path.
collection_list: List of string keys to collect.
as_text: If `True`, writes the meta_graph as an ASCII proto.
export_scope: Optional `string`. Name scope to remove.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated with
this Saver.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of filename and with `_debug` added before
the file extension.
Returns:
A `MetaGraphDef` proto.
"""
# pylint: enable=line-too-long
return export_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
saver_def=self.saver_def,
collection_list=collection_list,
as_text=as_text,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs,
save_debug_info=save_debug_info)
def restore(self, sess, save_path):
"""Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters. None in eager mode.
save_path: Path where parameters were previously saved.
Raises:
ValueError: If save_path is None or not a valid checkpoint.
"""
if self._is_empty:
return
if save_path is None:
raise ValueError("Can't load save_path when it is None.")
if not checkpoint_management.checkpoint_exists(compat.as_text(save_path)):
raise ValueError("The passed save_path is not a valid checkpoint: " +
compat.as_text(save_path))
logging.info("Restoring parameters from %s", compat.as_text(save_path))
try:
if context.executing_eagerly():
self._build_eager(save_path, build_save=False, build_restore=True)
else:
sess.run(self.saver_def.restore_op_name,
{self.saver_def.filename_tensor_name: save_path})
except errors.NotFoundError as err:
# There are three common conditions that might cause this error:
# 0. The file is missing. We ignore here, as this is checked above.
# 1. This is an object-based checkpoint trying name-based loading.
# 2. The graph has been altered and a variable or other name is missing.
# 1. The checkpoint would not be loaded successfully as is. Try to parse
# it as an object-based checkpoint.
try:
names_to_keys = object_graph_key_mapping(save_path)
except errors.NotFoundError:
# 2. This is not an object-based checkpoint, which likely means there
# is a graph mismatch. Re-raise the original error with
# a helpful message (b/110263146)
raise _wrap_restore_error_with_msg(
err, "a Variable name or other graph key that is missing")
# This is an object-based checkpoint. We'll print a warning and then do
# the restore.
logging.warning(
"Restoring an object-based checkpoint using a name-based saver. This "
"may be somewhat fragile, and will re-build the Saver. Instead, "
"consider loading object-based checkpoints using "
"tf.train.Checkpoint().")
self._object_restore_saver = saver_from_object_based_checkpoint(
checkpoint_path=save_path,
var_list=self._var_list,
builder=self._builder,
names_to_keys=names_to_keys,
cached_saver=self._object_restore_saver)
self._object_restore_saver.restore(sess=sess, save_path=save_path)
except errors.InvalidArgumentError as err:
# There is a mismatch between the graph and the checkpoint being loaded.
# We add a more reasonable error message here to help users (b/110263146)
raise _wrap_restore_error_with_msg(
err, "a mismatch between the current graph and the graph")
@staticmethod
def _add_collection_def(meta_graph_def, key, export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
export_scope: Optional `string`. Name scope to remove.
"""
meta_graph.add_collection_def(
meta_graph_def, key, export_scope=export_scope)
@tf_export(v1=["train.import_meta_graph"])
def import_meta_graph(meta_graph_or_file,
clear_devices=False,
import_scope=None,
**kwargs):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
```Python
...
# Create a saver.
saver = tf.compat.v1.train.Saver(...variables...)
# Remember the training_op we want to run by adding it to a collection.
tf.compat.v1.add_to_collection('train_op', train_op)
sess = tf.compat.v1.Session()
for step in xrange(1000000):
sess.run(train_op)
if step % 1000 == 0:
# Saves checkpoint, which by default also exports a meta_graph
# named 'my-model-global_step.meta'.
saver.save(sess, 'my-model', global_step=step)
```
Later we can continue training from this saved `meta_graph` without building
the model from scratch.
```Python
with tf.Session() as sess:
new_saver =
tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')
new_saver.restore(sess, 'my-save-dir/my-model-10000')
# tf.get_collection() returns a list. In this example we only want
# the first one.
train_op = tf.get_collection('train_op')[0]
for step in xrange(1000000):
sess.run(train_op)
```
NOTE: Restarting training from saved `meta_graph` only works if the
device assignments have not changed.
Example:
Variables, placeholders, and independent operations can also be stored, as
shown in the following example.
```Python
# Saving contents and operations.
v1 = tf.placeholder(tf.float32, name="v1")
v2 = tf.placeholder(tf.float32, name="v2")
v3 = tf.math.multiply(v1, v2)
vx = tf.Variable(10.0, name="vx")
v4 = tf.add(v3, vx, name="v4")
saver = tf.train.Saver([vx])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(vx.assign(tf.add(vx, vx)))
result = sess.run(v4, feed_dict={v1:12.0, v2:3.3})
print(result)
saver.save(sess, "./model_ex1")
```
Later this model can be restored and contents loaded.
```Python
# Restoring variables and running operations.
saver = tf.train.import_meta_graph("./model_ex1.meta")
sess = tf.Session()
saver.restore(sess, "./model_ex1")
result = sess.run("v4:0", feed_dict={"v1:0": 12.0, "v2:0": 3.3})
print(result)
```
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during import.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
**kwargs: Optional keyed arguments.
Returns:
A saver constructed from `saver_def` in `MetaGraphDef` or None.
A None value is returned if no variables exist in the `MetaGraphDef`
(i.e., there are no variables to restore).
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported. No graph exists when eager
execution is enabled.
@end_compatibility
""" # pylint: disable=g-doc-exception
return _import_meta_graph_with_return_elements(meta_graph_or_file,
clear_devices, import_scope,
**kwargs)[0]
def _import_meta_graph_with_return_elements(meta_graph_or_file,
clear_devices=False,
import_scope=None,
return_elements=None,
**kwargs):
"""Import MetaGraph, and return both a saver and returned elements."""
if context.executing_eagerly():
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
if not isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file)
else:
meta_graph_def = meta_graph_or_file
imported_vars, imported_return_elements = (
meta_graph.import_scoped_meta_graph_with_return_elements(
meta_graph_def,
clear_devices=clear_devices,
import_scope=import_scope,
return_elements=return_elements,
**kwargs))
saver = _create_saver_from_imported_meta_graph(meta_graph_def, import_scope,
imported_vars)
return saver, imported_return_elements
def _create_saver_from_imported_meta_graph(meta_graph_def, import_scope,
imported_vars):
"""Return a saver for restoring variable values to an imported MetaGraph."""
if meta_graph_def.HasField("saver_def"):
# Infer the scope that is prepended by `import_scoped_meta_graph`.
scope = import_scope
var_names = list(imported_vars.keys())
if var_names:
sample_key = var_names[0]
sample_var = imported_vars[sample_key]
scope = sample_var.name[:-len(sample_key)]
return Saver(saver_def=meta_graph_def.saver_def, name=scope)
else:
if variables._all_saveable_objects(scope=import_scope): # pylint: disable=protected-access
# Return the default saver instance for all graph variables.
return Saver()
else:
# If no graph variables exist, then a Saver cannot be constructed.
logging.info("Saver not created because there are no variables in the"
" graph to restore")
return None
@tf_export(v1=["train.export_meta_graph"])
def export_meta_graph(filename=None,
meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
as_text=False,
graph=None,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False,
save_debug_info=False,
**kwargs):
# pylint: disable=line-too-long
"""Returns `MetaGraphDef` proto.
Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the generated
`MetaGraphDef` protocol buffer.
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
graph: The `Graph` to export. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract the
subgraph. The scope name will be striped from the node definitions for
easy import later into new name scopes. If `None`, the whole graph is
exported. graph_def and export_scope cannot both be specified.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the graph
(both Save/Restore ops and SaverDefs) that are not associated with the
provided SaverDef.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_debug_info: If `True`, save the GraphDebugInfo to a separate file,
which in the same directory of filename and with `_debug` added before the
file extend.
**kwargs: Optional keyed arguments.
Returns:
A `MetaGraphDef` proto.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported unless both `graph_def` and
`graph` are provided. No graph exists when eager execution is enabled.
@end_compatibility
"""
# pylint: enable=line-too-long
if context.executing_eagerly() and not (graph_def is not None and
graph is not None):
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
filename=filename,
meta_info_def=meta_info_def,
graph_def=graph_def,
saver_def=saver_def,
collection_list=collection_list,
as_text=as_text,
graph=graph,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs,
save_debug_info=save_debug_info,
**kwargs)
return meta_graph_def
def _wrap_restore_error_with_msg(err, extra_verbiage):
err_msg = ("Restoring from checkpoint failed. This is most likely "
"due to {} from the checkpoint. Please ensure that you "
"have not altered the graph expected based on the checkpoint. "
"Original error:\n\n{}").format(extra_verbiage, err.message)
return err.__class__(err.node_def, err.op, err_msg)
ops.register_proto_function(
ops.GraphKeys.SAVERS,
proto_type=saver_pb2.SaverDef,
to_proto=Saver.to_proto,
from_proto=Saver.from_proto)
def object_graph_key_mapping(checkpoint_path):
"""Return name to key mappings from the checkpoint.
Args:
checkpoint_path: string, path to object-based checkpoint
Returns:
Dictionary mapping tensor names to checkpoint keys.
"""
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
object_graph_string = reader.get_tensor(trackable.OBJECT_GRAPH_PROTO_KEY)
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
names_to_keys = {}
for node in object_graph_proto.nodes:
for attribute in node.attributes:
names_to_keys[attribute.full_name] = attribute.checkpoint_key
return names_to_keys
def saver_from_object_based_checkpoint(checkpoint_path,
var_list=None,
builder=None,
names_to_keys=None,
cached_saver=None):
"""Return a `Saver` which reads from an object-based checkpoint.
This function validates that all variables in the variables list are remapped
in the object-based checkpoint (or `names_to_keys` dict if provided). A
saver will be created with the list of remapped variables.
The `cached_saver` argument allows the user to pass in a previously created
saver, so multiple `saver.restore()` calls don't pollute the graph when graph
building. This assumes that keys are consistent, meaning that the
1) `checkpoint_path` checkpoint, and
2) checkpoint used to create the `cached_saver`
are the same type of object-based checkpoint. If this argument is set, this
function will simply validate that all variables have been remapped by the
checkpoint at `checkpoint_path`.
Note that in general, `tf.train.Checkpoint` should be used to restore/save an
object-based checkpoint.
Args:
checkpoint_path: string, path to object-based checkpoint
var_list: list of `Variables` that appear in the checkpoint. If `None`,
`var_list` will be set to all saveable objects.
builder: a `BaseSaverBuilder` instance. If `None`, a new `BulkSaverBuilder`
will be created.
names_to_keys: dict mapping string tensor names to checkpooint keys. If
`None`, this dict will be generated from the checkpoint file.
cached_saver: Cached `Saver` object with remapped variables.
Returns:
`Saver` with remapped variables for reading from an object-based checkpoint.
Raises:
ValueError if the checkpoint provided is not an object-based checkpoint.
NotFoundError: If one of the variables in `var_list` can not be found in the
checkpoint. This could mean the checkpoint or `names_to_keys` mapping is
missing the variable.
"""
if names_to_keys is None:
try:
names_to_keys = object_graph_key_mapping(checkpoint_path)
except errors.NotFoundError:
raise ValueError("Checkpoint in %s not an object-based checkpoint." %
checkpoint_path)
if var_list is None:
var_list = variables._all_saveable_objects() # pylint: disable=protected-access
if builder is None:
builder = BulkSaverBuilder()
saveables = saveable_object_util.validate_and_slice_inputs(var_list)
current_names = set()
for saveable in saveables:
for spec in saveable.specs:
current_names.add(spec.name)
previous_names = set(names_to_keys.keys())
missing_names = current_names - previous_names
if missing_names:
extra_names = previous_names - current_names
intersecting_names = previous_names.intersection(current_names)
raise errors.NotFoundError(
None,
None,
message=(
"\n\nExisting variables not in the checkpoint: %s\n\n"
"Variables names when this checkpoint was written which don't "
"exist now: %s\n\n"
"(%d variable name(s) did match)\n\n"
"Could not find some variables in the checkpoint (see names "
"above). Saver was attempting to load an object-based checkpoint "
"(saved using tf.train.Checkpoint or tf.keras.Model.save_weights) "
"using variable names. If the checkpoint was written with eager "
"execution enabled, it's possible that variable names have "
"changed (for example missing a '_1' suffix). It's also "
"possible that there are new variables which did not exist "
"when the checkpoint was written. You can construct a "
"Saver(var_list=...) with only the variables which previously "
"existed, and if variable names have changed you may need to "
"make this a dictionary with the old names as keys. If you're "
"using an Estimator, you'll need to return a tf.train.Saver "
"inside a tf.train.Scaffold from your model_fn.") %
(", ".join(sorted(missing_names)), ", ".join(
sorted(extra_names)), len(intersecting_names)))
for saveable in saveables:
for spec in saveable.specs:
spec.name = names_to_keys[spec.name]
if cached_saver is None:
return Saver(saveables)
return cached_saver
|
tensorflow-master
|
tensorflow/python/training/saver.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SWIG-wrapped quantize training rewriting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_module
class PywrapQuantizeTrainingTest(test.TestCase):
# Mainly to verify the python interface is working.
# More tests for this function can be found in the related c++ tests.
def testQuantizeTraining(self):
with session.Session() as sess:
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
self.assertEquals(c.eval(), 42.0)
self.assertEquals(len(sess.graph_def.node), 3)
result = pywrap_tensorflow.do_quantize_training_on_graphdef(
sess.graph_def, 8)
# We just want to guarantee that some rewrite happened.
self.assertGreater(len(result.node), 3)
# Test that save/restoring works for EMA variables generated in the
# quantized training rewrite.
@test_util.run_v1_only('b/120545219')
def testQuantizedSaveRestore(self):
save_path = os.path.join(self.get_temp_dir(), 'quantized_save_restore')
g = ops.Graph()
with session.Session(graph=g) as sess:
a = constant_op.constant(6.0, shape=[1, 1], name='a')
b = variables.VariableV1(
constant_op.constant(7.0, shape=[1, 1]), name='b')
c = math_ops.matmul(a, b, name='matmul')
init_op = variables.global_variables_initializer()
saver = saver_module.Saver({'b': b})
result = pywrap_tensorflow.do_quantize_training_on_graphdef(
sess.graph_def, 8)
with ops.Graph().as_default() as g, session.Session(graph=g) as sess:
_ = importer.import_graph_def(result, name='')
# Initialize the variable.
self.evaluate(g.get_operation_by_name(init_op.name))
# Run the graph for one step to assign values to the quantization min/max
# variables.
self.evaluate(g.get_tensor_by_name(c.name))
saver.save(sess, save_path)
with ops.Graph().as_default() as g, session.Session(graph=g) as sess:
_ = importer.import_graph_def(result, name='')
# When we restore the saved variabled, the quantization variables should
# be restored as well.
saver.restore(sess, save_path)
self.assertEquals(7.0, sess.run(g.get_tensor_by_name('b:0')))
self.assertEquals(6.0, sess.run(g.get_tensor_by_name('a/Min/Variable:0')))
self.assertEquals(6.0, sess.run(g.get_tensor_by_name('a/Max/Variable:0')))
self.assertEquals(7.0,
sess.run(g.get_tensor_by_name('b/read/Min/Variable:0')))
self.assertEquals(7.0,
sess.run(g.get_tensor_by_name('b/read/Max/Variable:0')))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/quantize_training_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to work with checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"load_checkpoint", "load_variable", "list_variables",
"checkpoints_iterator", "init_from_checkpoint"
]
@tf_export("train.load_checkpoint")
def load_checkpoint(ckpt_dir_or_file):
"""Returns `CheckpointReader` for checkpoint found in `ckpt_dir_or_file`.
If `ckpt_dir_or_file` resolves to a directory with multiple checkpoints,
reader for the latest checkpoint is returned.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
file.
Returns:
`CheckpointReader` object.
Raises:
ValueError: If `ckpt_dir_or_file` resolves to a directory with no
checkpoints.
"""
filename = _get_checkpoint_filename(ckpt_dir_or_file)
if filename is None:
raise ValueError("Couldn't find 'checkpoint' file or checkpoints in "
"given directory %s" % ckpt_dir_or_file)
return pywrap_tensorflow.NewCheckpointReader(filename)
@tf_export("train.load_variable")
def load_variable(ckpt_dir_or_file, name):
"""Returns the tensor value of the given variable in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
name: Name of the variable to return.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
name = name[:-2]
reader = load_checkpoint(ckpt_dir_or_file)
return reader.get_tensor(name)
@tf_export("train.list_variables")
def list_variables(ckpt_dir_or_file):
"""Returns list of all variables in the checkpoint.
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
Returns:
List of tuples `(name, shape)`.
"""
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
names = sorted(variable_map.keys())
result = []
for name in names:
result.append((name, variable_map[name]))
return result
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum number of seconds to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info("Waiting for new checkpoint at %s", checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info("Found new checkpoint at %s", checkpoint_path)
return checkpoint_path
@tf_export("train.checkpoints_iterator")
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum number of seconds to wait between checkpoints. If left
as `None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info("Timed-out waiting for a checkpoint.")
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
@tf_export(v1=["train.init_from_checkpoint"])
def init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""Replaces `tf.Variable` initializers so they load from a checkpoint file.
Values are not loaded immediately, but when the initializer is run
(typically by running a `tf.compat.v1.global_variables_initializer` op).
Note: This overrides default initialization ops of specified variables and
redefines dtype.
Assignment map supports following syntax:
* `'checkpoint_scope_name/': 'scope_name/'` - will load all variables in
current `scope_name` from `checkpoint_scope_name` with matching tensor
names.
* `'checkpoint_scope_name/some_other_variable': 'scope_name/variable_name'` -
will initialize `scope_name/variable_name` variable
from `checkpoint_scope_name/some_other_variable`.
* `'scope_variable_name': variable` - will initialize given `tf.Variable`
object with tensor 'scope_variable_name' from the checkpoint.
* `'scope_variable_name': list(variable)` - will initialize list of
partitioned variables with tensor 'scope_variable_name' from the checkpoint.
* `'/': 'scope_name/'` - will load all variables in current `scope_name` from
checkpoint's root (e.g. no scope).
Supports loading into partitioned variables, which are represented as
`'<variable>/part_<part #>'`.
Example:
```python
# Say, '/tmp/model.ckpt' has the following tensors:
# -- name='old_scope_1/var1', shape=[20, 2]
# -- name='old_scope_1/var2', shape=[50, 4]
# -- name='old_scope_2/var3', shape=[100, 100]
# Create new model's variables
with tf.compat.v1.variable_scope('new_scope_1'):
var1 = tf.compat.v1.get_variable('var1', shape=[20, 2],
initializer=tf.compat.v1.zeros_initializer())
with tf.compat.v1.variable_scope('new_scope_2'):
var2 = tf.compat.v1.get_variable('var2', shape=[50, 4],
initializer=tf.compat.v1.zeros_initializer())
# Partition into 5 variables along the first axis.
var3 = tf.compat.v1.get_variable(name='var3', shape=[100, 100],
initializer=tf.compat.v1.zeros_initializer(),
partitioner=lambda shape, dtype: [5, 1])
# Initialize all variables in `new_scope_1` from `old_scope_1`.
init_from_checkpoint('/tmp/model.ckpt', {'old_scope_1/': 'new_scope_1'})
# Use names to specify which variables to initialize from checkpoint.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': 'new_scope_1/var1',
'old_scope_1/var2': 'new_scope_2/var2'})
# Or use tf.Variable objects to identify what to initialize.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_1/var1': var1,
'old_scope_1/var2': var2})
# Initialize partitioned variables using variable's name
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': 'new_scope_2/var3'})
# Or specify the list of tf.Variable objects.
init_from_checkpoint('/tmp/model.ckpt',
{'old_scope_2/var3': var3._get_variable_list()})
```
Args:
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.
assignment_map: Dict, where keys are names of the variables in the
checkpoint and values are current variables or names of current variables
(in default graph).
Raises:
ValueError: If missing variables in current graph, or if missing
checkpoints or tensors in checkpoints.
"""
init_from_checkpoint_fn = lambda _: _init_from_checkpoint(
ckpt_dir_or_file, assignment_map)
if distribution_strategy_context.get_cross_replica_context():
init_from_checkpoint_fn(None)
else:
distribution_strategy_context.get_replica_context().merge_call(
init_from_checkpoint_fn)
def _init_from_checkpoint(ckpt_dir_or_file, assignment_map):
"""See `init_from_checkpoint` for documentation."""
ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
reader = load_checkpoint(ckpt_dir_or_file)
variable_map = reader.get_variable_to_shape_map()
for tensor_name_in_ckpt, current_var_or_name in sorted(
six.iteritems(assignment_map)):
var = None
# Check if this is Variable object or list of Variable objects (in case of
# partitioned variables).
if _is_variable(current_var_or_name) or (
isinstance(current_var_or_name, list)
and all(_is_variable(v) for v in current_var_or_name)):
var = current_var_or_name
else:
store_vars = vs._get_default_variable_store()._vars # pylint:disable=protected-access
# Check if this variable is in var_store.
var = store_vars.get(current_var_or_name, None)
# Also check if variable is partitioned as list.
if var is None:
var = _collect_partitioned_variable(current_var_or_name, store_vars)
if var is not None:
# If 1 to 1 mapping was provided, find variable in the checkpoint.
if tensor_name_in_ckpt not in variable_map:
raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
))
if _is_variable(var):
# Additional at-call-time checks.
if not var.get_shape().is_compatible_with(
variable_map[tensor_name_in_ckpt]):
raise ValueError(
"Shape of variable %s (%s) doesn't match with shape of "
"tensor %s (%s) from checkpoint reader." % (
var.name, str(var.get_shape()),
tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
))
var_name = var.name
else:
var_name = ",".join([v.name for v in var])
_set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
else:
scopes = ""
# TODO(vihanjain): Support list of 'current_var_or_name' here.
if "/" in current_var_or_name:
scopes = current_var_or_name[:current_var_or_name.rindex("/")]
if not tensor_name_in_ckpt.endswith("/"):
raise ValueError(
"Assignment map with scope only name {} should map to scope only "
"{}. Should be 'scope/': 'other_scope/'.".format(
scopes, tensor_name_in_ckpt))
# If scope to scope mapping was provided, find all variables in the scope
# and create variable to variable mapping.
scope_variables = set()
for var_name in store_vars:
if not scopes or var_name.startswith(scopes + "/"):
# Consume /part_ if partitioned variable.
if "/part_" in var_name:
var_name = var_name[:var_name.index("/part_")]
scope_variables.add(var_name)
for var_name in sorted(scope_variables):
# Lookup name with specified prefix and suffix from current variable.
# If tensor_name given is '/' (root), don't use it for full name.
full_tensor_name = var_name[len(scopes):]
if current_var_or_name != "/":
full_tensor_name = full_tensor_name[1:]
if tensor_name_in_ckpt != "/":
full_tensor_name = tensor_name_in_ckpt + full_tensor_name
# Remove trailing '/', if any, in the full_tensor_name
if full_tensor_name.endswith("/"):
full_tensor_name = full_tensor_name[:-1]
if full_tensor_name not in variable_map:
raise ValueError(
"Tensor %s (%s in %s) is not found in %s checkpoint" % (
full_tensor_name, var_name[len(scopes) + 1:],
tensor_name_in_ckpt, ckpt_dir_or_file
))
var = store_vars.get(var_name, None)
if var is None:
var = _collect_partitioned_variable(var_name, store_vars)
_set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
logging.debug("Initialize variable %s from checkpoint %s with %s",
var_name, ckpt_dir_or_file, full_tensor_name)
def _get_checkpoint_filename(ckpt_dir_or_file):
"""Returns checkpoint filename given directory or specific checkpoint file."""
if gfile.IsDirectory(ckpt_dir_or_file):
return checkpoint_management.latest_checkpoint(ckpt_dir_or_file)
return ckpt_dir_or_file
def _set_checkpoint_initializer(variable,
ckpt_file,
tensor_name,
slice_spec,
name="checkpoint_initializer"):
"""Overrides given variable's initialization op.
Sets variable initializer to assign op that initializes variable from tensor's
value in the checkpoint.
Args:
variable: `tf.Variable` object.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
slice_spec: Slice specification for loading partitioned tensors.
name: Name of the operation.
"""
base_type = variable.dtype.base_dtype
# Do not colocate with variable since RestoreV2 op only runs on CPU and
# colocation will force variable (and other ops that colocate with variable)
# to be on CPU as well. It is okay to place the variable's initializer op on
# CPU since it will only be run once at the start.
with ops.device(variable.device), ops.device("/cpu:0"):
restore_op = io_ops.restore_v2(
ckpt_file, [tensor_name], [slice_spec], [base_type], name=name)[0]
names_to_saveables = saveable_object_util.op_list_to_dict([variable])
saveable_objects = []
for name, op in names_to_saveables.items():
for s in saveable_object_util.saveable_objects_for_op(op, name):
saveable_objects.append(s)
assert len(saveable_objects) == 1 # Should be only one variable.
init_op = saveable_objects[0].restore([restore_op], restored_shapes=None)
# pylint:disable=protected-access
variable._initializer_op = init_op
restore_op.set_shape(variable.shape)
variable._initial_value = restore_op
# pylint:enable=protected-access
def _set_variable_or_list_initializer(variable_or_list, ckpt_file,
tensor_name):
"""Overrides initialization op of given variable or list of variables.
Calls `_set_checkpoint_initializer` for each variable in the given list of
variables.
Args:
variable_or_list: `tf.Variable` object or a list of `tf.Variable` objects.
ckpt_file: string, full path of the checkpoint.
tensor_name: Name of the tensor to load from the checkpoint.
Raises:
ValueError: if all objects in `variable_or_list` are not partitions of the
same large variable.
"""
if isinstance(variable_or_list, (list, tuple)):
# A set of slices.
slice_name = None
for v in variable_or_list:
slice_info = v._save_slice_info # pylint:disable=protected-access
if slice_name is None:
slice_name = slice_info.full_name
elif slice_name != slice_info.full_name:
raise ValueError("Slices must all be from the same tensor: %s != %s" %
(slice_name, slice_info.full_name))
_set_checkpoint_initializer(v, ckpt_file, tensor_name, slice_info.spec)
else:
_set_checkpoint_initializer(variable_or_list, ckpt_file, tensor_name, "")
def _is_variable(x):
return (isinstance(x, variables.Variable) or
resource_variable_ops.is_resource_variable(x))
def _collect_partitioned_variable(name, all_vars):
"""Returns list of `tf.Variable` that comprise the partitioned variable."""
if name + "/part_0" in all_vars:
var = []
i = 0
while name + "/part_%d" % i in all_vars:
var.append(all_vars[name + "/part_%d" % i])
i += 1
return var
return None
|
tensorflow-master
|
tensorflow/python/training/checkpoint_utils.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reads Summaries from and writes Summaries to event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.summary.summary_iterator import summary_iterator
from tensorflow.python.summary.writer.writer import FileWriter as _FileWriter
from tensorflow.python.summary.writer.writer_cache import FileWriterCache as SummaryWriterCache
# pylint: enable=unused-import
from tensorflow.python.util.deprecation import deprecated
class SummaryWriter(_FileWriter):
@deprecated("2016-11-30",
"Please switch to tf.summary.FileWriter. The interface and "
"behavior is the same; this is just a rename.")
def __init__(self,
logdir,
graph=None,
max_queue=10,
flush_secs=120,
graph_def=None):
"""Creates a `SummaryWriter` and an event file.
This class is deprecated, and should be replaced with tf.summary.FileWriter.
On construction the summary writer creates a new event file in `logdir`.
This event file will contain `Event` protocol buffers constructed when you
call one of the following functions: `add_summary()`, `add_session_log()`,
`add_event()`, or `add_graph()`.
If you pass a `Graph` to the constructor it is added to
the event file. (This is equivalent to calling `add_graph()` later).
TensorBoard will pick the graph from the file and display it graphically so
you can interactively explore the graph you built. You will usually pass
the graph from the session in which you launched it:
```python
...create a graph...
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Create a summary writer, add the 'graph' to the event file.
writer = tf.compat.v1.summary.FileWriter(<some-directory>, sess.graph)
```
The other arguments to the constructor control the asynchronous writes to
the event file:
* `flush_secs`: How often, in seconds, to flush the added summaries
and events to disk.
* `max_queue`: Maximum number of summaries or events pending to be
written to disk before one of the 'add' calls block.
Args:
logdir: A string. Directory where event file will be written.
graph: A `Graph` object, such as `sess.graph`.
max_queue: Integer. Size of the queue for pending events and summaries.
flush_secs: Number. How often, in seconds, to flush the
pending events and summaries to disk.
graph_def: DEPRECATED: Use the `graph` argument instead.
"""
super(SummaryWriter, self).__init__(logdir, graph, max_queue, flush_secs,
graph_def)
|
tensorflow-master
|
tensorflow/python/training/summary_io.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adam for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.AdamOptimizer"])
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.
See [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
use_locking=False,
name="Adam"):
r"""Construct a new Adam optimizer.
Initialization:
$$m_0 := 0 \text{(Initialize initial 1st moment vector)}$$
$$v_0 := 0 \text{(Initialize initial 2nd moment vector)}$$
$$t := 0 \text{(Initialize timestep)}$$
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section 2 of the paper:
$$t := t + 1$$
$$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
$$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
$$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
$$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
The default value of 1e-8 for epsilon might not be a good default in
general. For example, when training an Inception network on ImageNet a
current good choice is 1.0 or 0.1. Note that since AdamOptimizer uses the
formulation just before Section 2.1 of the Kingma and Ba paper rather than
the formulation in Algorithm 1, the "epsilon" referred to here is "epsilon
hat" in the paper.
The sparse implementation of this algorithm (used when the gradient is an
IndexedSlices object, typically because of `tf.gather` or an embedding
lookup in the forward pass) does apply momentum to variable slices even if
they were not used in the forward pass (meaning they have a gradient equal
to zero). Momentum decay (beta1) is also applied to the entire momentum
accumulator. This means that the sparse behavior is equivalent to the dense
behavior (in contrast to some momentum implementations which ignore momentum
unless a variable slice was actually used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta1`, `beta2`, and `epsilon` can each be a
callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
self._epsilon_t = None
def _get_beta_accumulators(self):
with ops.init_scope():
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return (self._get_non_slot_variable("beta1_power", graph=graph),
self._get_non_slot_variable("beta2_power", graph=graph))
def _create_slots(self, var_list):
# Create the beta1 and beta2 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(
initial_value=self._beta1, name="beta1_power", colocate_with=first_var)
self._create_non_slot_variable(
initial_value=self._beta2, name="beta2_power", colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
beta1 = self._call_if_callable(self._beta1)
beta2 = self._call_if_callable(self._beta2)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(beta2, name="beta2")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.apply_adam(
var,
m,
v,
math_ops.cast(beta1_power, var.dtype.base_dtype),
math_ops.cast(beta2_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power, beta2_power = self._get_beta_accumulators()
return training_ops.resource_apply_adam(
var.handle,
m.handle,
v.handle,
math_ops.cast(beta1_power, grad.dtype.base_dtype),
math_ops.cast(beta2_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse_shared(self, grad, var, indices, scatter_add):
beta1_power, beta2_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_scaled_g_values = grad * (1 - beta1_t)
m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking)
with ops.control_dependencies([m_t]):
m_t = scatter_add(m, indices, m_scaled_g_values)
# v_t = beta2 * v + (1 - beta2) * (g_t * g_t)
v = self.get_slot(var, "v")
v_scaled_g_values = (grad * grad) * (1 - beta2_t)
v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking)
with ops.control_dependencies([v_t]):
v_t = scatter_add(v, indices, v_scaled_g_values)
v_sqrt = math_ops.sqrt(v_t)
var_update = state_ops.assign_sub(
var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values,
var,
grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x,
i,
v,
use_locking=self._use_locking))
def _resource_scatter_add(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_add(x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(grad, var, indices,
self._resource_scatter_add)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power, beta2_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t, use_locking=self._use_locking)
return control_flow_ops.group(
*update_ops + [update_beta1, update_beta2], name=name_scope)
|
tensorflow-master
|
tensorflow/python/training/adam.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the [reading data
how-to](https://tensorflow.org/api_guides/python/reading_data)
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
_store_sparse = sparse_ops._add_sparse_to_tensors_map
_store_many_sparse = sparse_ops._add_many_sparse_to_tensors_map
_restore_sparse = sparse_ops._take_many_sparse_from_tensors_map
# pylint: enable=protected-access
@tf_export(
"io.match_filenames_once",
v1=["io.match_filenames_once", "train.match_filenames_once"])
@deprecation.deprecated_endpoints("train.match_filenames_once")
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
NOTE: The order of the files returned can be non-deterministic.
Args:
pattern: A file pattern (glob), or 1D tensor of file patterns.
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching the pattern(s).
"""
with ops.name_scope(name, "matching_filenames", [pattern]) as name:
return vs.variable(
name=name, initial_value=io_ops.matching_files(pattern),
trainable=False, validate_shape=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
@tf_export(v1=["train.limit_epochs"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.from_tensors(tensor).repeat(num_epochs)`.")
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Note: creates local counter `epochs`. Use `local_variables_initializer()` to
initialize local variables.
Args:
tensor: Any `Tensor`.
num_epochs: A positive integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
Raises:
ValueError: if `num_epochs` is invalid.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.name_scope(name, "limit_epochs", [tensor]) as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = vs.variable(
zero64, name="epochs", trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
@tf_export(v1=["train.input_producer"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.from_tensor_slices(input_tensor).shuffle"
"(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If "
"`shuffle=False`, omit the `.shuffle(...)`.")
def input_producer(input_tensor,
element_shape=None,
num_epochs=None,
shuffle=True,
seed=None,
capacity=32,
shared_name=None,
summary_name=None,
name=None,
cancel_op=None):
"""Output the rows of `input_tensor` to a queue for an input pipeline.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
input_tensor: A tensor with the rows to produce. Must be at least
one-dimensional. Must either have a fully-defined shape, or
`element_shape` must be defined.
element_shape: (Optional.) A `TensorShape` representing the shape of a
row of `input_tensor`, if it cannot be inferred.
num_epochs: (Optional.) An integer. If specified `input_producer` produces
each row of `input_tensor` `num_epochs` times before generating an
`OutOfRange` error. If not specified, `input_producer` can cycle through
the rows of `input_tensor` an unlimited number of times.
shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled
within each epoch.
seed: (Optional.) An integer. The seed to use if `shuffle` is true.
capacity: (Optional.) The capacity of the queue to be used for buffering
the input.
shared_name: (Optional.) If set, this queue will be shared under the given
name across multiple sessions.
summary_name: (Optional.) If set, a scalar summary for the current queue
size will be generated, using this name as part of the tag.
name: (Optional.) A name for queue.
cancel_op: (Optional.) Cancel op for the queue
Returns:
A queue with the output rows. A `QueueRunner` for the queue is
added to the current `QUEUE_RUNNER` collection of the current
graph.
Raises:
ValueError: If the shape of the input cannot be inferred from the arguments.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
with ops.name_scope(name, "input_producer", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
element_shape = input_tensor.shape[1:].merge_with(element_shape)
if not element_shape.is_fully_defined():
raise ValueError("Either `input_tensor` must have a fully defined shape "
"or `element_shape` must be specified")
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity,
dtypes=[input_tensor.dtype.base_dtype],
shapes=[element_shape],
shared_name=shared_name, name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
q, [enq], cancel_op=cancel_op))
if summary_name is not None:
summary.scalar(summary_name,
math_ops.cast(q.size(), dtypes.float32) * (1. / capacity))
return q
@tf_export(v1=["train.string_input_producer"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.from_tensor_slices(string_tensor).shuffle"
"(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If "
"`shuffle=False`, omit the `.shuffle(...)`.")
def string_input_producer(string_tensor,
num_epochs=None,
shuffle=True,
seed=None,
capacity=32,
shared_name=None,
name=None,
cancel_op=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an `OutOfRange` error. If not specified,
`string_input_producer` can cycle through the strings in `string_tensor`
an unlimited number of times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions. All sessions open to the device which has
this queue will be able to access it via the shared_name. Using this in
a distributed setting means each name will only be seen by one of the
sessions which has access to this operation.
name: A name for the operations (optional).
cancel_op: Cancel op for the queue (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Raises:
ValueError: If the string_tensor is a null Python list. At runtime,
will fail with an assertion if string_tensor becomes a null tensor.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
not_null_err = "string_input_producer requires a non-null input tensor"
if not isinstance(string_tensor, ops.Tensor) and not string_tensor:
raise ValueError(not_null_err)
with ops.name_scope(name, "input_producer", [string_tensor]) as name:
string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.greater(array_ops.size(string_tensor), 0),
[not_null_err])]):
string_tensor = array_ops.identity(string_tensor)
return input_producer(
input_tensor=string_tensor,
element_shape=[],
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
capacity=capacity,
shared_name=shared_name,
name=name,
summary_name="fraction_of_%d_full" % capacity,
cancel_op=cancel_op)
@tf_export(v1=["train.range_input_producer"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.range(limit).shuffle(limit).repeat(num_epochs)`. If "
"`shuffle=False`, omit the `.shuffle(...)`.")
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Note: if `num_epochs` is not `None`, this function creates local counter
`epochs`. Use `local_variables_initializer()` to initialize local variables.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
with ops.name_scope(name, "input_producer", [limit]) as name:
range_tensor = math_ops.range(limit)
return input_producer(
range_tensor, [], num_epochs, shuffle, seed, capacity,
shared_name, "fraction_of_%d_full" % capacity, name)
@tf_export(v1=["train.slice_input_producer"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.from_tensor_slices(tuple(tensor_list)).shuffle"
"(tf.shape(input_tensor, out_type=tf.int64)[0]).repeat(num_epochs)`. If "
"`shuffle=False`, omit the `.shuffle(...)`.")
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
Raises:
ValueError: if `slice_input_producer` produces nothing from `tensor_list`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
with ops.name_scope(name, "input_producer", tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity,
shared_name=shared_name)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
class _SparseMetaData(object):
"""Store information about the Tensor: Is it sparse?, map_op, and rank."""
def __init__(self, sparse, map_op, rank):
"""Create the metadata.
Args:
sparse: Python boolean.
map_op: The `Operation` that created the `SparseTensorsMap` in question.
This Op contains information about the underlying Map object and the
dtype of the original data.
rank: The statically known rank of the `SparseTensor`.
"""
self._sparse = sparse
self._map_op = map_op
self._rank = tensor_shape.Dimension(rank)
def __eq__(self, other):
if self.sparse != other.sparse:
return False
if not self.sparse:
return True
# If map_ops are not the same, the data source is not the same.
if (self.map_op is not None) != (other.map_op is not None):
return False
if self.map_op != other.map_op:
return False
if not self.rank.is_compatible_with(other.rank):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "[SparseMetaData(%s, %s, %s)]" % (self.sparse, self.map_op.name,
self.rank)
def merge_with(self, other):
if self != other:
raise ValueError("SparseMetaData objects are incompatible: %s vs. %s"
% (self, other))
if self.sparse:
self.rank.merge_with(other.rank)
return self
@property
def map_op(self):
return self._map_op
@property
def sparse(self):
return self._sparse
@property
def rank(self):
return self._rank
def _as_tensor_list(tensors):
if isinstance(tensors, dict):
return [tensors[k] for k in sorted(tensors, key=str)]
else:
return tensors
def _as_tensor_list_list(tensors_list):
if not tensors_list:
raise ValueError("Expected at least one set of tensors")
if isinstance(tensors_list[0], dict):
expected_keys = set(tensors_list[0].keys())
for tensors in tensors_list[1:]:
if set(tensors.keys()) != expected_keys:
raise ValueError("All dictionaries in tensors_list must have "
"the same keys")
return [_as_tensor_list(tensors) for tensors in tensors_list]
else:
return tensors_list
def _as_original_type(original_tensors, tensor_list):
if isinstance(original_tensors, dict):
if len(original_tensors) == 1:
# tensor_list is bogusly returned as a single tensor if only one tensor
# was enqueued. Make it a list again. See b/28117485.
tensor_list = [tensor_list]
return {k: tensor_list[i]
for i, k in enumerate(sorted(original_tensors, key=str))}
else:
return tensor_list
def _store_sparse_tensors(tensor_list, enqueue_many, keep_input,
shared_map_ops=None):
"""Store SparseTensors for feeding into batch, etc.
If `shared_map_ops` is provided, the underlying `SparseTensorsMap` objects
are reused (shared). This argument is useful for, e.g., `batch_join`
where multiple enqueue operations write to the same Queue component,
and another (dequeue) thread reads from that same location and must then
restore the associated `SparseTensor` objects. In this case, the sparse
restore must have a single `SparseTensorMap` from which to read out the
handles; so a single `SparseTensorMap` must be shared for storing
across the multiple enqueue operations. This sharing is performed by
calling `_store_sparse_tensors` the first time with `shared_map_ops=None`,
and then in subsequent times with this value set to the list of `Operation`
objects created in the first call.
Args:
tensor_list: List of `Tensor` and `SparseTensor` objects.
enqueue_many: Python `Boolean`.
keep_input: Must be a scalar bool Tensor (not a Python bool). If False,
don't store.
shared_map_ops: (optional) List of `Operation` objects from a previous
call to `_store_sparse_tensors`. If not `None`, the op types should be
one of `AddSparseToTensorsMap` or `AddManySparseToTensorsMap` in the
locations corresponding to `SparseTensors` in `tensor_list`.
Returns:
A tuple `(stored_list, sparse_info_list)` where `stored_list` is a list
of `Tensor` objects (same length as `tensor_list`) and `sparse_info_list`
is a list of the same length of `_SparseMetaData` objects.
"""
maybe_shared_map_ops = shared_map_ops or [None] * len(tensor_list)
def _sparse_meta_data(t, storing_op, map_op):
if not isinstance(t, sparse_tensor.SparseTensor):
return _SparseMetaData(False, None, None)
rank = t.dense_shape.shape.with_rank(1).dims[0]
if enqueue_many:
rank -= 1
# If a shared map_op was provided, use that. Otherwise use the name of
# the operation used to store the SparseTensor.
return _SparseMetaData(
sparse=True, map_op=map_op or storing_op, rank=rank)
def _maybe_store(t, shared_map_op):
"""Store Sparse tensor, if necessary."""
if not isinstance(t, sparse_tensor.SparseTensor):
return t
map_op_name = shared_map_op.name if shared_map_op else None
def _maybe_store_sparse(t, map_op_name, keep_input):
"""Conditionally store a single sparse Tensor."""
return utils.smart_cond(
keep_input,
lambda: _store_sparse(t, shared_name=map_op_name),
lambda: constant_op.constant(-1, dtypes.int64))
def _maybe_store_many_sparse(t, map_op_name, keep_input):
"""Conditionally store multiple sparse Tensors."""
out_tensor = utils.smart_cond(
keep_input,
lambda: _store_many_sparse(t, shared_name=map_op_name),
lambda: -1 * array_ops.ones(array_ops.shape(t)[0:1], dtypes.int64))
out_tensor.set_shape([None]) # necessary when t.ndims is unknown
return out_tensor
def _sparse_values_to_keep(t, keep_input):
"""Convert a per-row `keep_input` vector to a per-value one."""
# Get the rows of every value in the sparse Tensor.
row_values = t.indices[:, 0]
# The value should be kept iff the row should be kept.
return array_ops.gather(keep_input, row_values)
if keep_input.shape.ndims == 1:
t = sparse_ops.sparse_retain(t, _sparse_values_to_keep(t, keep_input))
store_f = lambda t, name, _: _store_many_sparse(t, shared_name=name)
elif enqueue_many:
store_f = _maybe_store_many_sparse
else:
store_f = _maybe_store_sparse
return store_f(t, map_op_name, keep_input)
stored_list = [
_maybe_store(t, shared_map_op) for t, shared_map_op
in zip(tensor_list, maybe_shared_map_ops)]
# Since the output of `_store{_many}_sparse is wrapped in a tf.cond `Merge`,
# we can't just get the Op of the resulting tensor.
def _sparse_op(stored):
for input_tensor in stored.op.inputs:
if input_tensor.op.type in ("AddSparseToTensorsMap",
"AddManySparseToTensorsMap"):
return input_tensor.op
# If there was no sparse input, then the original stored Tensor wasn't
# sparse and we can just return the original Tensor's Op.
return stored.op
sparse_info_list = [
_sparse_meta_data(t, _sparse_op(stored), shared_map_op)
for t, stored, shared_map_op
in zip(tensor_list, stored_list, maybe_shared_map_ops)]
# Expand dims of stored tensors by 1 for proper enqueue shape
stored_list = [
array_ops.expand_dims(s, [-1]) if s_info.sparse else s
for s, s_info in zip(stored_list, sparse_info_list)]
return stored_list, sparse_info_list
def _store_sparse_tensors_join(tensor_list_list, enqueue_many, keep_input):
"""Store SparseTensors for feeding into batch_join, etc."""
(s0, sparse_info_list) = _store_sparse_tensors(
tensor_list_list[0], enqueue_many, keep_input)
stored_list_list = [s0]
for tensor_list in tensor_list_list[1:]:
s, sparse_info_candidate = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input,
[st.map_op for st in sparse_info_list])
if sparse_info_list != sparse_info_candidate:
raise ValueError("Inconsistent SparseTensors list: %s vs. %s"
% (tensor_list_list[0], tensor_list))
sparse_info_list = [
info.merge_with(candidate)
for (info, candidate) in zip(sparse_info_list, sparse_info_candidate)]
stored_list_list.append(s)
return (stored_list_list, sparse_info_list)
def _restore_sparse_tensors(stored_list, sparse_info_list):
"""Restore SparseTensors after dequeue in batch, batch_join, etc."""
received_sequence = isinstance(stored_list, collections.Sequence)
if not received_sequence:
stored_list = (stored_list,)
tensors = [
_restore_sparse(sparse_map_op=info.map_op,
sparse_handles=array_ops.squeeze(s, [1]),
rank=tensor_shape.dimension_value(info.rank + 1))
if info.sparse else s
for (s, info) in zip(stored_list, sparse_info_list)]
has_st = any(isinstance(x, sparse_tensor.SparseTensor) for x in tensors)
if has_st:
t_values = [
x.values if isinstance(x, sparse_tensor.SparseTensor)
else x
for x in tensors]
with_deps = lambda x: control_flow_ops.with_dependencies(t_values, x)
ensure_restore_tensors = [
sparse_tensor.SparseTensor(indices=with_deps(x.indices),
values=with_deps(x.values),
dense_shape=with_deps(x.dense_shape))
if isinstance(x, sparse_tensor.SparseTensor)
else with_deps(x)
for x in tensors]
else:
ensure_restore_tensors = tensors
return ensure_restore_tensors if received_sequence else tensors[0]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _validate_keep_input(keep_input, enqueue_many):
"""Validate `keep_input` argument to conditional batching functions."""
keep_input = ops.convert_to_tensor(keep_input)
if keep_input.shape.ndims is None:
raise ValueError(
"`keep_input` dimensions must be known at graph construction.")
if not enqueue_many and keep_input.shape.ndims == 1:
raise ValueError(
"`keep_input` cannot be a vector when `enqueue_many=False`.")
if keep_input.shape.ndims > 1:
raise ValueError("`keep_input` must be 0 or 1 dimensions.")
return keep_input
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
(", ".join(x.name for x in types),
", ".join(x.name for x in other_types)))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
"""Calculate and merge the shapes of incoming tensors.
Args:
tensor_list_list: List of tensor lists.
shapes: List of shape tuples corresponding to tensors within the lists.
enqueue_many: Boolean describing whether shapes will be enqueued as
batches or individual entries.
Returns:
A list of shapes aggregating shape inference info from `tensor_list_list`,
or returning `shapes` if it is not `None`.
Raises:
ValueError: If any of the inferred shapes in `tensor_list_list` lack a
well defined rank.
"""
if shapes is None:
len0 = len(tensor_list_list[0])
for tl in tensor_list_list:
for i in xrange(len0):
if tl[i].shape.ndims is None:
raise ValueError("Cannot infer Tensor's rank: %s" % tl[i])
shapes = [_merge_shapes(
[tl[i].shape.as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(len0)]
return shapes
def _select_which_to_enqueue(tensor_list, keep_input):
"""Select which examples to enqueue based on vector `keep_input`."""
select_i = math_ops.cast(keep_input, dtypes.int32)
tensor_list = [
data_flow_ops.dynamic_partition(x, select_i, num_partitions=2)[1]
for x in tensor_list]
return tensor_list
def _enqueue_join(queue, tensor_list_list, enqueue_many, keep_input):
"""Enqueue `tensor_list_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.shape.ndims == 1:
enqueue_ops = [enqueue_fn(_select_which_to_enqueue(x, keep_input))
for x in tensor_list_list]
else:
enqueue_ops = [utils.smart_cond(
keep_input,
lambda: enqueue_fn(tl), # pylint:disable=cell-var-from-loop
control_flow_ops.no_op) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many, keep_input):
"""Enqueue `tensor_list` in `queue`."""
if enqueue_many:
enqueue_fn = queue.enqueue_many
else:
enqueue_fn = queue.enqueue
if keep_input.shape.ndims == 1:
enqueue_ops = [
enqueue_fn(_select_which_to_enqueue(tensor_list, keep_input))] * threads
else:
enqueue_ops = [utils.smart_cond(
keep_input,
lambda: enqueue_fn(tensor_list),
control_flow_ops.no_op)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
def _batch(tensors, batch_size, keep_input, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `batch` and `maybe_batch`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "batch", list(tensor_list) + [keep_input]) as name:
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def _batch_join(tensors_list, batch_size, keep_input, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Helper function for `batch_join` and `maybe_batch_join`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = _which_queue(dynamic_pad)(
capacity=capacity, dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
summary.scalar(
"fraction_of_%d_full" % capacity,
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
def _shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Helper function for `shuffle_batch` and `maybe_shuffle_batch`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "shuffle_batch",
list(tensor_list) + [keep_input]) as name:
if capacity <= min_after_dequeue:
raise ValueError("capacity %d must be bigger than min_after_dequeue %d."
% (capacity, min_after_dequeue))
tensor_list = _validate(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list, sparse_info = _store_sparse_tensors(
tensor_list, enqueue_many, keep_input)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue(queue, tensor_list, num_threads, enqueue_many, keep_input)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue), dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return _as_original_type(tensors, dequeued)
def _shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Helper function for `shuffle_batch_join` and `maybe_shuffle_batch_join`."""
if context.executing_eagerly():
raise ValueError(
"Input pipelines based on Queues are not supported when eager execution"
" is enabled. Please use tf.data to ingest data into your model"
" instead.")
tensor_list_list = _as_tensor_list_list(tensors_list)
with ops.name_scope(name, "shuffle_batch_join",
_flatten(tensor_list_list) + [keep_input]) as name:
tensor_list_list = _validate_join(tensor_list_list)
keep_input = _validate_keep_input(keep_input, enqueue_many)
tensor_list_list, sparse_info = _store_sparse_tensors_join(
tensor_list_list, enqueue_many, keep_input)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes, shared_name=shared_name)
_enqueue_join(queue, tensor_list_list, enqueue_many, keep_input)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue), dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"fraction_over_%d_of_%d_full" %
(min_after_dequeue, capacity - min_after_dequeue))
summary.scalar(summary_name, full)
if allow_smaller_final_batch:
dequeued = queue.dequeue_up_to(batch_size, name=name)
else:
dequeued = queue.dequeue_many(batch_size, name=name)
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
# tensors_list was validated to not be empty.
return _as_original_type(tensors_list[0], dequeued)
# Batching functions ----------------------------------------------------------
@tf_export(v1=["train.batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.batch(batch_size)` (or `padded_batch(...)` if "
"`dynamic_pad=True`).")
def batch(tensors, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Creates batches of tensors in `tensors`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensors` is assumed to represent a single
example. An input tensor with shape `[x, y, z]` will be output as a tensor
with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a batch of
examples, where the first dimension is indexed by example, and all members of
`tensors` should have the same size in the first dimension. If an input
tensor has shape `[*, x, y, z]`, the output will have shape `[batch_size, x,
y, z]`. The `capacity` argument controls the how long the prefetching is
allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensors`. The batching will
be nondeterministic if `num_threads > 1`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensors` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same types as `tensors` (except if
the input is a list of one element, then it returns a tensor, not a list).
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _batch(
tensors,
batch_size,
keep_input=True,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.filter(...).batch(batch_size)` (or `padded_batch(...)`"
" if `dynamic_pad=True`).")
def maybe_batch(tensors, keep_input, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Conditionally creates batches of tensors based on `keep_input`.
See docstring in `batch` for more details.
Args:
tensors: The list or dictionary of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensors`. The batching will
be nondeterministic if `num_threads > 1`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensors` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
"""
return _batch(
tensors,
batch_size,
keep_input,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).batch(batch_size)` (or "
"`padded_batch(...)` if `dynamic_pad=True`).")
def batch_join(tensors_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, dynamic_pad=False, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarly
to the `tensors` argument of `tf.compat.v1.train.batch()`.
WARNING: This function is nondeterministic, since it starts a separate thread
for each tensor.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensors_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors_list` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have value `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _batch_join(
tensors_list,
batch_size,
keep_input=True,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).filter(...).batch(batch_size)` (or "
"`padded_batch(...)` if `dynamic_pad=True`).")
def maybe_batch_join(tensors_list, keep_input, batch_size, capacity=32,
enqueue_many=False, shapes=None, dynamic_pad=False,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Runs a list of tensors to conditionally fill a queue to create batches.
See docstring in `batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
return _batch_join(
tensors_list,
batch_size,
keep_input,
capacity=capacity,
enqueue_many=enqueue_many,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.shuffle_batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.shuffle(min_after_dequeue).batch(batch_size)`.")
def shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None, name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors`.
If `enqueue_many` is `False`, `tensors` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensors` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.compat.v1.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensors` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch(
tensors,
batch_size,
capacity,
min_after_dequeue,
keep_input=True,
num_threads=num_threads,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_shuffle_batch"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.filter(...).shuffle(min_after_dequeue).batch(batch_size)`"
".")
def maybe_shuffle_batch(tensors, batch_size, capacity, min_after_dequeue,
keep_input, num_threads=1, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Creates batches by randomly shuffling conditionally-enqueued tensors.
See docstring in `shuffle_batch` for more details.
Args:
tensors: The list or dictionary of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (Optional) If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the types as `tensors`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch(
tensors,
batch_size,
capacity,
min_after_dequeue,
keep_input,
num_threads=num_threads,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.shuffle_batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).shuffle(min_after_dequeue).batch"
"(batch_size)`.")
def shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, allow_smaller_final_batch=False,
shared_name=None, name=None):
"""Create batches by randomly shuffling tensors.
The `tensors_list` argument is a list of tuples of tensors, or a list of
dictionaries of tensors. Each element in the list is treated similarly
to the `tensors` argument of `tf.compat.v1.train.shuffle_batch()`.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensors_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensors_list`.
`len(tensors_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensors_list[i]`. `tensors_list[i1][j]` must match
`tensors_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensors_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y, z]`
will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensors_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensors_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queue is closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`shape` property will have a first `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors_list[i]`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors_list`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch_join(
tensors_list,
batch_size,
capacity,
min_after_dequeue,
keep_input=True,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
@tf_export(v1=["train.maybe_shuffle_batch_join"])
@deprecation.deprecated(
None, "Queue-based input pipelines have been replaced by `tf.data`. Use "
"`tf.data.Dataset.interleave(...).filter(...).shuffle(min_after_dequeue)"
".batch(batch_size)`.")
def maybe_shuffle_batch_join(tensors_list, batch_size, capacity,
min_after_dequeue, keep_input, seed=None,
enqueue_many=False, shapes=None,
allow_smaller_final_batch=False, shared_name=None,
name=None):
"""Create batches by randomly shuffling conditionally-enqueued tensors.
See docstring in `shuffle_batch_join` for more details.
Args:
tensors_list: A list of tuples or dictionaries of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
keep_input: A `bool` Tensor. This tensor controls whether the input is
added to the queue or not. If it is a scalar and evaluates `True`, then
`tensors` are all added to the queue. If it is a vector and `enqueue_many`
is `True`, then each example is added to the queue only if the
corresponding value in `keep_input` is `True`. This tensor essentially
acts as a filtering mechanism.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors_list[i]`.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batch to be smaller if there are insufficient items left in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A list or dictionary of tensors with the same number and types as
`tensors_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors_list`.
@compatibility(eager)
Input pipelines based on Queues are not supported when eager execution is
enabled. Please use the `tf.data` API to ingest data under eager execution.
@end_compatibility
"""
return _shuffle_batch_join(
tensors_list,
batch_size,
capacity,
min_after_dequeue,
keep_input,
seed=seed,
enqueue_many=enqueue_many,
shapes=shapes,
allow_smaller_final_batch=allow_smaller_final_batch,
shared_name=shared_name,
name=name)
|
tensorflow-master
|
tensorflow/python/training/input.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for QueueRunner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import time
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
_MockOp = collections.namedtuple("MockOp", ["name"])
@test_util.run_v1_only("QueueRunner removed from v2")
class QueueRunnerTest(test.TestCase):
def testBasic(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
threads = qr.create_threads(sess)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0"])
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, self.evaluate(var))
def testTwoOps(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var0 = variables.VariableV1(zero64)
count_up_to_3 = var0.count_up_to(3)
var1 = variables.VariableV1(zero64)
count_up_to_30 = var1.count_up_to(30)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
qr = queue_runner_impl.QueueRunner(queue, [count_up_to_3, count_up_to_30])
threads = qr.create_threads(sess)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0",
"QueueRunnerThread-fifo_queue-CountUpTo_1:0"])
self.evaluate(variables.global_variables_initializer())
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
self.assertEqual(3, self.evaluate(var0))
self.assertEqual(30, self.evaluate(var1))
def testExceptionsCaptured(self):
with self.cached_session() as sess:
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
qr = queue_runner_impl.QueueRunner(queue, [_MockOp("i fail"),
_MockOp("so fail")])
threads = qr.create_threads(sess)
self.evaluate(variables.global_variables_initializer())
for t in threads:
t.start()
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(2, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
self.assertTrue("Operation not in the graph" in str(exceptions[1]))
def testRealDequeueEnqueue(self):
with self.cached_session() as sess:
q0 = data_flow_ops.FIFOQueue(3, dtypes.float32)
enqueue0 = q0.enqueue((10.0,))
close0 = q0.close()
q1 = data_flow_ops.FIFOQueue(30, dtypes.float32)
enqueue1 = q1.enqueue((q0.dequeue(),))
dequeue1 = q1.dequeue()
qr = queue_runner_impl.QueueRunner(q1, [enqueue1])
threads = qr.create_threads(sess)
for t in threads:
t.start()
# Enqueue 2 values, then close queue0.
enqueue0.run()
enqueue0.run()
close0.run()
# Wait for the queue runner to terminate.
for t in threads:
t.join()
# It should have terminated cleanly.
self.assertEqual(0, len(qr.exceptions_raised))
# The 2 values should be in queue1.
self.assertEqual(10.0, self.evaluate(dequeue1))
self.assertEqual(10.0, self.evaluate(dequeue1))
# And queue1 should now be closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError, "is closed"):
self.evaluate(dequeue1)
def testRespectCoordShouldStop(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
# As the coordinator to stop. The queue runner should
# finish immediately.
coord = coordinator.Coordinator()
coord.request_stop()
threads = qr.create_threads(sess, coord)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0",
"QueueRunnerThread-fifo_queue-close_on_stop"])
for t in threads:
t.start()
coord.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 0.
self.assertEqual(0, self.evaluate(var))
def testRequestStopOnException(self):
with self.cached_session() as sess:
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
qr = queue_runner_impl.QueueRunner(queue, [_MockOp("not an op")])
coord = coordinator.Coordinator()
threads = qr.create_threads(sess, coord)
for t in threads:
t.start()
# The exception should be re-raised when joining.
with self.assertRaisesRegexp(ValueError, "Operation not in the graph"):
coord.join()
def testGracePeriod(self):
with self.cached_session() as sess:
# The enqueue will quickly block.
queue = data_flow_ops.FIFOQueue(2, dtypes.float32)
enqueue = queue.enqueue((10.0,))
dequeue = queue.dequeue()
qr = queue_runner_impl.QueueRunner(queue, [enqueue])
coord = coordinator.Coordinator()
qr.create_threads(sess, coord, start=True)
# Dequeue one element and then request stop.
dequeue.op.run()
time.sleep(0.02)
coord.request_stop()
# We should be able to join because the RequestStop() will cause
# the queue to be closed and the enqueue to terminate.
coord.join(stop_grace_period_secs=1.0)
def testMultipleSessions(self):
with self.cached_session() as sess:
with session.Session() as other_sess:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
coord = coordinator.Coordinator()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
# NOTE that this test does not actually start the threads.
threads = qr.create_threads(sess, coord=coord)
other_threads = qr.create_threads(other_sess, coord=coord)
self.assertEqual(len(threads), len(other_threads))
def testIgnoreMultiStarts(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
coord = coordinator.Coordinator()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
threads = []
# NOTE that this test does not actually start the threads.
threads.extend(qr.create_threads(sess, coord=coord))
new_threads = qr.create_threads(sess, coord=coord)
self.assertEqual([], new_threads)
def testThreads(self):
with self.cached_session() as sess:
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
self.evaluate(variables.global_variables_initializer())
qr = queue_runner_impl.QueueRunner(queue, [count_up_to,
_MockOp("bad_op")])
threads = qr.create_threads(sess, start=True)
self.assertEqual(sorted(t.name for t in threads),
["QueueRunnerThread-fifo_queue-CountUpTo:0",
"QueueRunnerThread-fifo_queue-bad_op"])
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
threads = qr.create_threads(sess, start=True)
for t in threads:
t.join()
exceptions = qr.exceptions_raised
self.assertEqual(1, len(exceptions))
self.assertTrue("Operation not in the graph" in str(exceptions[0]))
def testName(self):
with ops.name_scope("scope"):
queue = data_flow_ops.FIFOQueue(10, dtypes.float32, name="queue")
qr = queue_runner_impl.QueueRunner(queue, [control_flow_ops.no_op()])
self.assertEqual("scope/queue", qr.name)
queue_runner_impl.add_queue_runner(qr)
self.assertEqual(
1, len(ops.get_collection(ops.GraphKeys.QUEUE_RUNNERS, "scope")))
def testStartQueueRunners(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.cached_session() as sess:
init_op.run()
threads = queue_runner_impl.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, self.evaluate(var))
def testStartQueueRunnersRaisesIfNotASession(self):
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.cached_session():
init_op.run()
with self.assertRaisesRegexp(TypeError, "tf.Session"):
queue_runner_impl.start_queue_runners("NotASession")
def testStartQueueRunnersIgnoresMonitoredSession(self):
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.cached_session():
init_op.run()
threads = queue_runner_impl.start_queue_runners(
monitored_session.MonitoredSession())
self.assertFalse(threads)
def testStartQueueRunnersNonDefaultGraph(self):
# CountUpTo will raise OUT_OF_RANGE when it reaches the count.
graph = ops.Graph()
with graph.as_default():
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.session(graph=graph) as sess:
init_op.run()
threads = queue_runner_impl.start_queue_runners(sess)
for t in threads:
t.join()
self.assertEqual(0, len(qr.exceptions_raised))
# The variable should be 3.
self.assertEqual(3, self.evaluate(var))
def testQueueRunnerSerializationRoundTrip(self):
graph = ops.Graph()
with graph.as_default():
queue = data_flow_ops.FIFOQueue(10, dtypes.float32, name="queue")
enqueue_op = control_flow_ops.no_op(name="enqueue")
close_op = control_flow_ops.no_op(name="close")
cancel_op = control_flow_ops.no_op(name="cancel")
qr0 = queue_runner_impl.QueueRunner(
queue, [enqueue_op],
close_op,
cancel_op,
queue_closed_exception_types=(errors_impl.OutOfRangeError,
errors_impl.CancelledError))
qr0_proto = queue_runner_impl.QueueRunner.to_proto(qr0)
qr0_recon = queue_runner_impl.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_recon.queue.name)
self.assertEqual(1, len(qr0_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_recon.close_op)
self.assertEqual(cancel_op, qr0_recon.cancel_op)
self.assertEqual(
(errors_impl.OutOfRangeError, errors_impl.CancelledError),
qr0_recon.queue_closed_exception_types)
# Assert we reconstruct an OutOfRangeError for QueueRunners
# created before QueueRunnerDef had a queue_closed_exception_types field.
del qr0_proto.queue_closed_exception_types[:]
qr0_legacy_recon = queue_runner_impl.QueueRunner.from_proto(qr0_proto)
self.assertEqual("queue", qr0_legacy_recon.queue.name)
self.assertEqual(1, len(qr0_legacy_recon.enqueue_ops))
self.assertEqual(enqueue_op, qr0_legacy_recon.enqueue_ops[0])
self.assertEqual(close_op, qr0_legacy_recon.close_op)
self.assertEqual(cancel_op, qr0_legacy_recon.cancel_op)
self.assertEqual((errors_impl.OutOfRangeError,),
qr0_legacy_recon.queue_closed_exception_types)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/queue_runner_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adadelta Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adadelta
class AdadeltaOptimizerTest(test.TestCase):
def doTestBasic(self, use_resource=False, use_callable_params=False):
num_updates = 4 # number of ADADELTA steps to perform
for dtype in [dtypes.half, dtypes.float32]:
for grad in [0.2, 0.1, 0.01]:
for lr in [1.0, 0.5, 0.1]:
var0_init = [1.0, 2.0]
var1_init = [3.0, 4.0]
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_init, dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
var1_init, dtype=dtype)
else:
var0 = variables.Variable(var0_init, dtype=dtype)
var1 = variables.Variable(var1_init, dtype=dtype)
grads = constant_op.constant([grad, grad], dtype=dtype)
accum = 0.0
accum_update = 0.0
# ADADELTA gradient optimizer
rho = 0.95
epsilon = 1e-8
if use_callable_params:
adadelta_opt = adadelta.AdadeltaOptimizer(
learning_rate=lambda: lr, # pylint: disable=cell-var-from-loop
rho=lambda: rho, # pylint: disable=cell-var-from-loop
epsilon=lambda: epsilon) # pylint: disable=cell-var-from-loop
else:
adadelta_opt = adadelta.AdadeltaOptimizer(
learning_rate=lr, rho=rho, epsilon=epsilon)
if not context.executing_eagerly():
adadelta_update = adadelta_opt.apply_gradients(
zip([grads, grads], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# TODO(lxuechen): This is hard to test in eager mode,
# since the optimizer is not fully initialized until the first
# call to `apply_gradients`
opt_vars = adadelta_opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var0._shared_name)
self.assertStartsWith(opt_vars[2].name, var1._shared_name)
self.assertStartsWith(opt_vars[3].name, var1._shared_name)
self.assertEqual(4, len(opt_vars))
# Assign slots
slot = [None] * 2
slot_update = [None] * 2
self.assertEqual(["accum", "accum_update"],
adadelta_opt.get_slot_names())
slot[0] = adadelta_opt.get_slot(var0, "accum")
self.assertEquals(slot[0].get_shape(), var0.get_shape())
self.assertFalse(slot[0] in variables.trainable_variables())
slot_update[0] = adadelta_opt.get_slot(var0, "accum_update")
self.assertEquals(slot_update[0].get_shape(), var0.get_shape())
self.assertFalse(slot_update[0] in variables.trainable_variables())
slot[1] = adadelta_opt.get_slot(var1, "accum")
self.assertEquals(slot[1].get_shape(), var1.get_shape())
self.assertFalse(slot[1] in variables.trainable_variables())
slot_update[1] = adadelta_opt.get_slot(var1, "accum_update")
self.assertEquals(slot_update[1].get_shape(), var1.get_shape())
self.assertFalse(slot_update[1] in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose(var0_init, self.evaluate(var0))
self.assertAllClose(var1_init, self.evaluate(var1))
update = [None] * num_updates
tot_update = 0
for step in range(num_updates):
# Run adadelta update for comparison
if not context.executing_eagerly():
self.evaluate(adadelta_update)
else:
adadelta_opt.apply_gradients(zip([grads, grads], [var0, var1]))
# Perform initial update without previous accum values
accum = accum * rho + (grad**2) * (1 - rho)
update[step] = (
np.sqrt(accum_update + epsilon) *
(1. / np.sqrt(accum + epsilon)) * grad)
accum_update = (
accum_update * rho + (update[step]**2) * (1.0 - rho))
tot_update += update[step] * lr
if not context.executing_eagerly():
# Check that the accumulators have been updated
# TODO(lxuechen): This is hard to test in eager mode
for slot_idx in range(2):
self.assertAllCloseAccordingToType(
np.array([accum, accum], dtype=dtype.as_numpy_dtype()),
self.evaluate(slot[slot_idx]),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array(
[accum_update, accum_update],
dtype=dtype.as_numpy_dtype()),
self.evaluate(slot_update[slot_idx]),
rtol=1e-5)
# Check that the parameters have been updated
self.assertAllCloseAccordingToType(
np.array(
[var0_init[0] - tot_update, var0_init[1] - tot_update],
dtype=dtype.as_numpy_dtype()),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array(
[var1_init[0] - tot_update, var1_init[1] - tot_update],
dtype=dtype.as_numpy_dtype()),
self.evaluate(var1),
rtol=1e-5)
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = adadelta.AdadeltaOptimizer(
1.0, 1.0, 1.0).minimize(loss)
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/adadelta_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for generating and loading vocab remappings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_checkpoint_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
ops.NotDifferentiable("GenerateVocabRemapping")
ops.NotDifferentiable("LoadAndRemapMatrix")
def _load_and_remap_matrix(ckpt_path,
old_tensor_name,
new_row_vocab_offset,
num_rows_to_load,
new_col_vocab_size,
initializer,
old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
max_rows_in_memory=-1):
"""Loads a 2-D (matrix) `Tensor` from checkpoint.
Generates 1D-remappings for rows and columns using the
`GenerateVocabRemapping` op, and initializes any anticipated values with the
provided initializer. Then, uses the `LoadAndRemapMatrix` op to create a
matrix that loads existing values from the checkpoint, while filling out
"missing" values with the newly initialized values. See
contrib/framework/ops/checkpoint_ops.cc for more information on the wrapped
functionality (LoadAndRemapMatrix). This wrapper can be used to perform only
row remapping or only col remapping. If only row remapping is desired,
{new,old}_col_vocab_file should be `None`, and vice versa for column
remapping.
NOTE: This only supports div-partitioning the vocabulary on the 1st dimension
(row axis) via `new_row_vocab_offset`.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_offset: A 0-indexed integer representing what line to
start reading at in the new row vocabulary. Used for partitioned
variables.
num_rows_to_load: Number of rows to load for the new vocabulary (note: to
support variable partitioning and partial loading, this does not need to
be the same as the number of entries in `new_row_vocab_file`).
new_col_vocab_size: Number of columns to load - should be the same as the
number of entries in `new_col_vocab_file`, since we don't support
partitioning along the column axis.
initializer: Callable initializer function that accepts a 1-D tensor as the
arg to specify the shape of the returned tensor. Used to initialize
missing values.
old_row_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_row_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`. NOTE: we do not provide an equivalent
`old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis - in which case, `new_row_vocab_offset` and
`num_rows_to_load` work under the assumption that the new row vocab is the
same as the old row vocab.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis - in which case, `new_col_vocab_size` works
under the assumption that the new col vocab is the same as the old col
vocab.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A Tensor of shape `[num_rows_to_load + num_row_oov_buckets,
new_col_vocab_size + num_col_oov_buckets]`, with values loaded from the
specified tensor in the checkpoint, and any missing or OOV values
initialized with the given `initializer`.
Raises:
ValueError: If `num_row_oov_buckets` or `num_col_oov_buckets` < 0.
ValueError: If either `old_row_vocab_file` or `new_row_vocab_file` is
provided, while the other is not. Same for `old_col_vocab_file` and
`new_col_vocab_file`.
ValueError: If neither row vocabs or col vocabs are provided.
"""
if num_row_oov_buckets < 0:
raise ValueError("num_row_oov_buckets must be >= 0, but received %d" %
num_row_oov_buckets)
if num_col_oov_buckets < 0:
raise ValueError("num_col_oov_buckets must be >= 0, but received %d" %
num_col_oov_buckets)
if bool(old_row_vocab_file) != bool(new_row_vocab_file):
raise ValueError(
"old_row_vocab_file and new_row_vocab_file must both be specified or "
"left unspecified. old_row_vocab_file='{}', new_row_vocab_file='{}'".
format(old_row_vocab_file, new_row_vocab_file))
if bool(old_col_vocab_file) != bool(new_col_vocab_file):
raise ValueError(
"old_col_vocab_file and new_col_vocab_file must both be specified or "
"left unspecified. old_col_vocab_file='{}', new_col_vocab_file='{}'".
format(old_col_vocab_file, new_col_vocab_file))
remap_rows = new_row_vocab_file and old_row_vocab_file
remap_cols = new_col_vocab_file and old_col_vocab_file
if not (remap_rows or remap_cols):
raise ValueError(
"Must provide either row or column vocab files. If no remapping is "
"necessary, consider using `tf.contrib.framework.init_from_checkpoint` "
"instead.")
num_rows_present = num_rows_to_load
if remap_rows:
row_remapping, num_rows_present = (
gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=new_row_vocab_file,
old_vocab_file=old_row_vocab_file,
new_vocab_offset=new_row_vocab_offset,
num_new_vocab=num_rows_to_load,
old_vocab_size=old_row_vocab_size))
else:
# Even when the rows are not being reordered, we still need to generate a
# remapping to account for initializing partitioned Variables (when
# new_row_vocab_offset is non-zero).
row_remapping = math_ops.range(
new_row_vocab_offset,
new_row_vocab_offset + num_rows_to_load,
dtype=dtypes.int64)
col_remapping = []
num_cols_present = new_col_vocab_size
if remap_cols:
col_remapping, num_cols_present = (
gen_checkpoint_ops.generate_vocab_remapping(
new_vocab_file=new_col_vocab_file,
old_vocab_file=old_col_vocab_file,
new_vocab_offset=0, # Offset is unused for cols (no partitioning).
num_new_vocab=new_col_vocab_size))
init_vals = initializer([
num_rows_to_load * new_col_vocab_size -
num_rows_present * num_cols_present, 1
])
return_tensor = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
row_remapping=row_remapping,
col_remapping=col_remapping,
initializing_values=init_vals,
num_rows=num_rows_to_load,
num_cols=new_col_vocab_size,
max_rows_in_memory=max_rows_in_memory)
# Add OOV row(s) and column(s).
if num_row_oov_buckets > 0:
init_row_oov_val = initializer([num_row_oov_buckets, new_col_vocab_size])
init_row_oov_val = ops.convert_to_tensor(init_row_oov_val)
return_tensor = array_ops.concat([return_tensor, init_row_oov_val], 0)
if num_col_oov_buckets > 0:
# We need to add any row OOV to the new column shape.
init_col_oov_val = initializer(
[num_rows_to_load + num_row_oov_buckets, num_col_oov_buckets])
init_col_oov_val = ops.convert_to_tensor(init_col_oov_val)
return_tensor = array_ops.concat([return_tensor, init_col_oov_val], 1)
return return_tensor
def _load_and_remap_matrix_initializer(ckpt_path,
old_tensor_name,
new_row_vocab_size,
new_col_vocab_size,
old_row_vocab_size=-1,
old_row_vocab_file=None,
new_row_vocab_file=None,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=0,
num_col_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
r"""Returns a var initializer for loading and remapping a 2-D (matrix) tensor.
The returned initializer loads a 2-D (matrix) `Tensor` with name
`old_tensor_name` from the checkpoint at `ckpt_path`. It will reorder the
rows/columns according to the specified vocab files and append additional
out-of-vocabulary rows/columns according to the number of OOV buckets.
The format of the file at the `{old,new}_{row,col}_vocab_file` path should be
a text file, with each line containing a single entity within the vocabulary.
Let the function `line_of(f, "x")` return the 0-indexed line number of the
entity "x" in file f, and the function `entity_at(f, i)` return the entity at
line i of file f. Then, row i of the new output matrix will be taken from row
`line_of(old_row_vocab_file, entity_at(new_row_vocab_file, i))` of the old
matrix. If any entity in `new_row_vocab_file` is not found in
`old_row_vocab_file`, that row is considered a "missing" row, and its values
will be initialized using the `initializer` arg. The same logic also applies
for the columns.
For example, assuming that:
* `old_row_vocab_file` contains "mercury\nvenus\nmars"
* `new_row_vocab_file` contains "venus\njupiter\nmercury"
* `old_col_vocab_file` contains "good\nbetter\nbest"
* `new_col_vocab_file` contains "good\nbest\nfantastic"
* `initializer` returns the natural numbers `[1, 2, 3, 4, ...]`
* `w(i, j)` represents the value from row i, column j of the old matrix
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1],
[2, 3, 4],
[w(0, 0), w(0, 2), 5]]`
If we further specify that:
* `num_row_oov_buckets` == 2
* `num_col_oov_buckets` == 1
Then the new output matrix will look like:
`[[w(1, 0), w(1, 2), 1, 12],
[2, 3, 4, 13],
[w(0, 0), w(0, 2), 5, 14],
[6, 7, 8, 15],
[9, 10, 11, 16]]`
If `{old,new}_row_vocab_file` are None, we assume that the old and new row
vocab files are the same, and no row remapping is done. If
`{old,new}_col_vocab_file` are None, we assume that the old and new column
vocab files are the same, and no column remapping is done.
The returned initializer only supports div-partitioning along the row axis. It
does not support partitioning along the column axis (as this is not common in
practice) or mod-partitioning.
NOTE: When this is used to warm-start variables, client code should use
`tf.lookup.index_table_from_tensor()` like
contrib/layers/python/layers/feature_column.py does, as opposed to
`tf.feature_to_id()` - in order to ensure the underlying lookup tables are the
same.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_row_vocab_size: `int` specifying the number of entries in
`new_row_vocab_file`. If no row remapping is needed (no row vocab
provided), this should be equal to the number of rows to load from the old
matrix (which can theoretically be smaller than the number of rows in the
old matrix).
new_col_vocab_size: `int` specifying the number of entries in
`new_col_vocab_file`. If no column remapping is needed (no column vocab
provided), this should be equal to the number of columns in the old
matrix.
old_row_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_row_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`. NOTE: we do not provide an equivalent
`old_col_vocab_size` for classes.
old_row_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old row vocabulary file. Can be None, which represents no
remapping on the row axis.
new_row_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new row vocabulary file. Can be None, which represents no remapping
on the row axis.
old_col_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old column vocabulary file. Can be None, which represents no
remapping on the column axis.
new_col_vocab_file: A scalar `Tensor` of type `string` containing the path
to the new column vocabulary file. Can be None, which represents no
remapping on the column axis.
num_row_oov_buckets: `int` specifying the number of out-of-vocabulary rows
to append. Must be >= 0.
num_col_oov_buckets: `int` specifying the number of out-of-vocabulary
columns to append. Must be >= 0.
initializer: Initializer function to initialize missing values. Accepts a
1-D tensor as the arg to specify the shape of the returned tensor. If
`None`, defaults to using `zeros_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function that should be used to initialize a
(potentially partitioned) `Variable` whose complete shape is
`[new_row_vocab_size + num_row_oov_buckets, new_col_vocab_size +
num_col_oov_buckets]`.
Raises:
TypeError: If `initializer` is specified but not callable.
"""
if initializer is None:
# TODO(b/25671353): Consider using sqrt(6/(fan_in + fan_out)) instead, from
# Glorot and Bengio, 2010.
initializer = init_ops.zeros_initializer()
if not callable(initializer):
raise TypeError(
"initializer must be callable, instead of being {} of type {}.".format(
initializer, type(initializer)))
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
"""Variable initializer.
Args:
shape: Shape of `Tensor` to return. Should include OOV on both axes.
dtype: Must be float32.
partition_info: variable_scope._PartitionInfo.
Returns:
`Tensor` of shape `shape`.
Raises:
TypeError: If `dtype` is anything other than float32.
ValueError: For shape mismatch upon invocation.
"""
# Sanity checks.
if dtype != dtypes.float32:
raise TypeError(
"Currently, only float32 is supported. Received dtype: {}".format(
dtype))
if len(shape) != 2:
raise ValueError("Expected 2-dim shape, but received: {}".format(shape))
if shape[0] <= 0:
raise ValueError(
"Expected 1st dim of shape to be > 0, but received shape: {}".format(
shape))
if shape[1] != (new_col_vocab_size + num_col_oov_buckets):
raise ValueError(
"Expected 2nd dim of shape to be new_col_vocab_size ({}) + "
"num_col_oov_buckets ({}) = {}, but received shape: {}".format(
new_col_vocab_size, num_col_oov_buckets,
new_col_vocab_size + num_col_oov_buckets, shape))
offset = 0
if partition_info is not None:
offset = partition_info.single_offset(shape)
if offset + shape[0] > new_row_vocab_size + num_row_oov_buckets:
raise ValueError(
"Trying to initialize {} additional rows after {} rows have already "
"been initialized, which would exceed expected total row count of "
"new_row_vocab_size ({}) + num_row_oov_buckets ({}) = {}.".format(
shape[0], offset, new_row_vocab_size, num_row_oov_buckets,
new_row_vocab_size + num_row_oov_buckets))
row_oov_buckets_to_use = min(shape[0],
max(0, offset + shape[0] - new_row_vocab_size))
num_rows_to_load = shape[0] - row_oov_buckets_to_use
# We may be operating on an OOV-only partition, in which case we newly
# initialize all rows of this partition.
if offset > new_row_vocab_size:
if shape[0] != row_oov_buckets_to_use:
raise ValueError(
"Partitioned variable offset is greater than new vocab size and "
"not operating on OOV-only partition.")
return initializer(shape)
return _load_and_remap_matrix(
ckpt_path=ckpt_path,
old_tensor_name=old_tensor_name,
new_row_vocab_offset=offset,
num_rows_to_load=num_rows_to_load,
new_col_vocab_size=new_col_vocab_size,
initializer=initializer,
old_row_vocab_size=old_row_vocab_size,
old_row_vocab_file=old_row_vocab_file,
new_row_vocab_file=new_row_vocab_file,
old_col_vocab_file=old_col_vocab_file,
new_col_vocab_file=new_col_vocab_file,
num_row_oov_buckets=row_oov_buckets_to_use,
num_col_oov_buckets=num_col_oov_buckets,
max_rows_in_memory=max_rows_in_memory)
return _initializer
def _load_embedding_initializer(ckpt_path,
embedding_tensor_name,
new_vocab_size,
embedding_dim,
old_vocab_file,
new_vocab_file,
old_vocab_size=-1,
num_oov_buckets=0,
initializer=None,
max_rows_in_memory=-1):
"""Returns a variable initializer for loading pre-trained embeddings.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
embedding weights and remapping according to the provided vocab files. See
docs for `load_and_remap_matrix_initializer()` for more details.
NOTE: Only for use with div-partitioned variables / vocabularies.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
embedding_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_vocab_size: Number of entries in the new vocab.
embedding_dim: `int` specifying the dimension of the embedding vectors from
the checkpoint. Must match the number of columns in the old embedding
matrix.
old_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old vocabulary file.
new_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new vocabulary file.
old_vocab_size: The number of entries to consider in the old vocabulary.
With the default value of -1, the entire old row vocabulary file will be
used. Otherwise, only the first `old_vocab_size` entries will be
considered for remapping.Must be smaller than the length of
`old_row_vocab_file`.
num_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`truncated_normal_initializer()`.
max_rows_in_memory: `int` specifying the maximum number of rows to load from
the checkpoint at once. If less than or equal to 0, the entire matrix will
be loaded into memory. Setting this arg trades increased disk reads for
lower memory usage.
Returns:
A variable initializer function.
"""
if initializer is None:
# TODO(b/25671353): This should be kept in sync with the stddev used by
# feature_column.py's _EmbeddingColumn.
initializer = init_ops.truncated_normal_initializer(
stddev=1.0 / math.sqrt(embedding_dim))
return _load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=embedding_tensor_name,
new_row_vocab_size=new_vocab_size,
new_col_vocab_size=embedding_dim,
old_row_vocab_size=old_vocab_size,
old_row_vocab_file=old_vocab_file,
new_row_vocab_file=new_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer,
max_rows_in_memory=max_rows_in_memory)
|
tensorflow-master
|
tensorflow/python/training/checkpoint_ops.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for slot_creator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import slot_creator
class SlotCreatorTest(test.TestCase):
@test_util.run_v1_only("b/120545219")
def testCreateSlotFromVariable(self):
with self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
slot = slot_creator.create_slot(v, v.initialized_value(), name="slot")
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([1.0, 2.5], self.evaluate(slot))
@test_util.run_deprecated_v1
def testCreateSlotFromTensor(self):
with self.cached_session():
v = constant_op.constant([1.0, 2.5], name="const")
slot = slot_creator.create_slot(v, v * 2, name="slot")
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([2.0, 5.0], self.evaluate(slot))
@test_util.run_deprecated_v1
def testCreateZerosSlotFromVariable(self):
with self.cached_session():
v = variables.Variable([1.0, 2.5], name="var")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
@test_util.run_v1_only("b/120545219")
def testCreateZerosSlotFromDynamicShapedVariable(self):
with self.cached_session():
dyn_shape = constant_op.constant([2], dtype=dtypes.int32)
dyn_shape = array_ops.placeholder_with_default(dyn_shape,
shape=[None])
v = variable_scope.get_variable(
"var",
initializer=random_ops.random_uniform(dyn_shape,
dtype=dtypes.float64),
validate_shape=False)
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertEqual("var/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
@test_util.run_deprecated_v1
def testCreateZerosSlotFromTensor(self):
with self.cached_session():
v = constant_op.constant([1.0, 2.5], name="const")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(v, name="slot")
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
@test_util.run_deprecated_v1
def testCreateZerosSlotFromDynamicShapedTensor(self):
with self.cached_session():
v = random_ops.random_uniform([2], dtype=dtypes.float64)
v = array_ops.placeholder_with_default(v, shape=[None], name="const")
with ops.control_dependencies(None):
slot = slot_creator.create_zeros_slot(
v, name="slot", dtype=dtypes.float64)
self.evaluate(variables.global_variables_initializer())
self.assertEqual("const/slot", slot.op.name)
self.assertEqual([2], array_ops.shape(slot).eval())
self.assertEqual(dtypes.float64, slot.dtype.base_dtype)
self.assertAllEqual([0.0, 0.0], self.evaluate(slot))
@test_util.run_v1_only("b/120545219")
def testCreateSlotFromVariableRespectsScope(self):
# See discussion on #2740.
with self.cached_session():
with variable_scope.variable_scope("scope"):
v = variables.Variable([1.0, 2.5], name="var")
slot = slot_creator.create_slot(v, v.initialized_value(), name="slot")
self.assertEqual("scope/scope/var/slot", slot.op.name)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/slot_creator_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for warm_starting_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import six
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import warm_starting_util as ws_util
from tensorflow.python.training.tracking import util as tracking_util
ones = init_ops.ones_initializer
norms = init_ops.truncated_normal_initializer
rand = init_ops.random_uniform_initializer
zeros = init_ops.zeros_initializer
class WarmStartingUtilTest(test.TestCase):
def _write_vocab(self, string_values, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, "w") as f:
f.write("\n".join(string_values))
return vocab_file
def _write_checkpoint(self, sess):
self.evaluate(variables.global_variables_initializer())
saver = saver_lib.Saver()
ckpt_prefix = os.path.join(self.get_temp_dir(), "model")
saver.save(sess, ckpt_prefix, global_step=0)
def _create_prev_run_var(self,
var_name,
shape=None,
initializer=None,
partitioner=None):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
var = variable_scope.get_variable(
var_name,
shape=shape,
initializer=initializer,
partitioner=partitioner)
self._write_checkpoint(sess)
if partitioner:
self.assertTrue(isinstance(var, variables.PartitionedVariable))
var = var._get_variable_list()
return var, self.evaluate(var)
def _create_prev_run_vars(self,
var_names,
shapes,
initializers):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
all_vars = []
for var_name, shape, initializer in zip(var_names, shapes,
initializers):
all_vars.append(variable_scope.get_variable(
var_name,
shape=shape,
initializer=initializer))
self._write_checkpoint(sess)
return [self.evaluate(var) for var in all_vars]
def _create_dummy_inputs(self):
return {
"sc_int": array_ops.sparse_placeholder(dtypes.int32),
"sc_hash": array_ops.sparse_placeholder(dtypes.string),
"sc_keys": array_ops.sparse_placeholder(dtypes.string),
"sc_vocab": array_ops.sparse_placeholder(dtypes.string),
"real": array_ops.placeholder(dtypes.float32)
}
def _create_linear_model(self, feature_cols, partitioner):
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=feature_cols,
units=1,
cols_to_vars=cols_to_vars)
# Return a dictionary mapping each column to its variable.
return cols_to_vars
def _assert_cols_to_vars(self, cols_to_vars, cols_to_expected_values, sess):
for col, expected_values in six.iteritems(cols_to_expected_values):
for i, var in enumerate(cols_to_vars[col]):
self.assertAllClose(expected_values[i], var.eval(sess))
def testWarmStartVar(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarPrevVarPartitioned(self):
_, weights = self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, fruit_weights.eval(sess))
def testWarmStartVarCurrentVarPartitioned(self):
_, prev_val = self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
prev_tensor_name, var = ws_util._get_var_info(fruit_weights)
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarBothVarsPartitioned(self):
_, weights = self._create_prev_run_var(
"old_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
prev_val = np.concatenate([weights[0], weights[1]], axis=0)
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"new_scope/fruit_weights",
shape=[4, 1],
initializer=[[0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
prev_tensor_name, var = ws_util._get_var_info(
fruit_weights, prev_tensor_name="old_scope/fruit_weights")
checkpoint_utils.init_from_checkpoint(self.get_temp_dir(),
{prev_tensor_name: var})
self.evaluate(variables.global_variables_initializer())
fruit_weights = fruit_weights._get_variable_list()
new_val = np.concatenate(
[fruit_weights[0].eval(sess), fruit_weights[1].eval(sess)], axis=0)
self.assertAllClose(prev_val, new_val)
def testWarmStartVarWithVocab(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocab(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabConstrainedOldVocabSize(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
previous_vocab_size=2)
self.evaluate(variables.global_variables_initializer())
# Old vocabulary limited to ['apple', 'banana'].
self.assertAllClose([[0.], [0.], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.], [0.]])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 5,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[2.], [1.5], [1.], [0.5], [0.]],
fruit_weights.eval(sess))
def testWarmStartVarWithColumnVocabPrevVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.], [1.2, 1.5, 0.],
[2.3, 2., 0.]], fruit_output_layer.eval(sess))
def testWarmStartVarWithVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights", initializer=[[0.5], [1.], [1.5], [2.]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(
fruit_weights,
new_vocab_path,
5,
self.get_temp_dir(),
prev_vocab_path,
current_oov_buckets=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabCurrentVarPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStartVarWithVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
self._create_prev_run_var(
"fruit_weights",
shape=[4, 1],
initializer=[[0.5], [1.], [1.5], [2.]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and two new elements.
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_weights = variable_scope.get_variable(
"fruit_weights",
shape=[6, 1],
initializer=[[0.], [0.], [0.], [0.], [0.], [0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_weights, new_vocab_path, 6,
self.get_temp_dir(), prev_vocab_path)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_weights, variables.PartitionedVariable))
fruit_weights_vars = fruit_weights._get_variable_list()
self.assertAllClose([[2.], [1.5], [1.]],
fruit_weights_vars[0].eval(sess))
self.assertAllClose([[0.5], [0.], [0.]],
fruit_weights_vars[1].eval(sess))
def testWarmStartVarWithColumnVocabBothVarsPartitioned(self):
prev_vocab_path = self._write_vocab(["apple", "orange"], "old_vocab")
self._create_prev_run_var(
"fruit_output_layer",
shape=[4, 2],
initializer=[[0.5, 0.3], [1., 0.8], [1.5, 1.2], [2., 2.3]],
partitioner=lambda shape, dtype: [2, 1])
# New vocab with elements in reverse order and one new element.
new_vocab_path = self._write_vocab(["orange", "apple", "banana"],
"new_vocab")
# New session and new graph.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
fruit_output_layer = variable_scope.get_variable(
"fruit_output_layer",
shape=[4, 3],
initializer=[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.],
[0., 0., 0.]],
partitioner=lambda shape, dtype: [2, 1])
ws_util._warm_start_var_with_vocab(fruit_output_layer, new_vocab_path,
current_vocab_size=3,
prev_ckpt=self.get_temp_dir(),
prev_vocab_path=prev_vocab_path,
axis=1)
self.evaluate(variables.global_variables_initializer())
self.assertTrue(
isinstance(fruit_output_layer, variables.PartitionedVariable))
fruit_output_layer_vars = fruit_output_layer._get_variable_list()
self.assertAllClose([[0.3, 0.5, 0.], [0.8, 1.0, 0.]],
fruit_output_layer_vars[0].eval(sess))
self.assertAllClose([[1.2, 1.5, 0.], [2.3, 2., 0.]],
fruit_output_layer_vars[1].eval(sess))
def testWarmStart_ListOfVariables(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=[var])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var.eval(), prev_int_val)
def testWarmStart_ListOfStrings(self):
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var("v1", shape=[10, 1],
initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=["v1"])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var.eval(), prev_int_val)
def testWarmStart_ListOfRegexes(self):
# Save checkpoint from which to warm-start.
[prev_v1_val, prev_v1_momentum_val,
prev_v2_val, _] = self._create_prev_run_vars(
var_names=["v1", "v1/Momentum", "v2", "v2/Momentum"],
shapes=[[10, 1]] * 4,
initializers=[ones()] * 4)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
# Initialize with zeros.
v1 = variable_scope.get_variable(
"v1",
shape=[10, 1],
initializer=zeros())
v1_momentum = variable_scope.get_variable(
"v1/Momentum",
shape=[10, 1],
initializer=zeros())
v2 = variable_scope.get_variable(
"v2",
shape=[10, 1],
initializer=zeros())
v2_momentum = variable_scope.get_variable(
"v2/Momentum",
shape=[10, 1],
initializer=zeros())
ws_util.warm_start(self.get_temp_dir(),
# This warm-starts both v1 and v1/Momentum, but only
# v2 (and not v2/Momentum).
vars_to_warm_start=["v1", "v2[^/]"])
self.evaluate(variables.global_variables_initializer())
# Verify the selection of weights were correctly warm-started (init
# overridden to ones).
self.assertAllEqual(v1.eval(), prev_v1_val)
self.assertAllEqual(v1_momentum.eval(), prev_v1_momentum_val)
self.assertAllEqual(v2.eval(), prev_v2_val)
self.assertAllEqual(v2_momentum.eval(), np.zeros([10, 1]))
def testWarmStart_SparseColumnIntegerized(self):
# Create feature column.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
# Save checkpoint from which to warm-start.
_, prev_int_val = self._create_prev_run_var(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
# Verify we initialized the values correctly.
self.assertAllEqual(np.ones([10, 1]), prev_int_val)
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_int: [np.zeros([10, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_int], partitioner)
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=".*sc_int.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_int: [prev_int_val]}, sess)
def testWarmStart_SparseColumnHashed(self):
# Create feature column.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
# Save checkpoint from which to warm-start.
_, prev_hash_val = self._create_prev_run_var(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [np.zeros([15, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_hash], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_hash.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_hash: [prev_hash_val]},
sess)
def testWarmStart_SparseColumnVocabulary(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*sc_vocab.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_ExplicitCheckpointFile(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature column.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
# Save checkpoint from which to warm-start.
_, prev_vocab_val = self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([4, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
# Since old vocab is not explicitly set in WarmStartSettings, the old
# vocab is assumed to be same as new vocab.
ws_util.warm_start(
# Explicitly provide the file prefix instead of just the dir.
os.path.join(self.get_temp_dir(), "model-0"),
vars_to_warm_start=".*sc_vocab.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [prev_vocab_val]},
sess)
def testWarmStart_SparseColumnVocabularyConstrainedVocabSizes(self):
# Create old vocabulary, and use a size smaller than the total number of
# entries.
old_vocab_path = self._write_vocab(["apple", "guava", "banana"],
"old_vocab")
old_vocab_size = 2 # ['apple', 'guava']
# Create new vocab for sparse column "sc_vocab".
current_vocab_path = self._write_vocab(
["apple", "banana", "guava", "orange"], "current_vocab")
# Create feature column. Only use 2 of the actual entries, resulting in
# ['apple', 'banana'] for the new vocabulary.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=current_vocab_path, vocabulary_size=2)
# Save checkpoint from which to warm-start.
self._create_prev_run_var(
"linear_model/sc_vocab/weights", shape=[2, 1], initializer=ones())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [np.zeros([2, 1])]},
sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([sc_vocab], partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=old_vocab_path,
old_vocab_size=old_vocab_size)
ws_util.warm_start(
ckpt_to_initialize_from=self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. 'banana' isn't in the
# first two entries of the old vocabulary, so it's newly initialized.
self._assert_cols_to_vars(cols_to_vars, {sc_vocab: [[[1], [0]]]}, sess)
def testWarmStart_BucketizedColumn(self):
# Create feature column.
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
# Save checkpoint from which to warm-start.
_, prev_bucket_val = self._create_prev_run_var(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, the weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [np.zeros([5, 1])]}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model([real_bucket], partitioner)
ws_util.warm_start(
self.get_temp_dir(), vars_to_warm_start=".*real_bucketized.*")
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars,
{real_bucket: [prev_bucket_val]}, sess)
def testWarmStart_MultipleCols(self):
# Create vocab for sparse column "sc_vocab".
vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"vocab")
# Create feature columns.
sc_int = fc.categorical_column_with_identity("sc_int", num_buckets=10)
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=vocab_path, vocabulary_size=4)
real = fc.numeric_column("real")
real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])
cross = fc.crossed_column([sc_keys, sc_vocab], hash_bucket_size=20)
all_linear_cols = [sc_int, sc_hash, sc_keys, sc_vocab, real_bucket, cross]
# Save checkpoint from which to warm-start. Also create a bias variable,
# so we can check that it's also warm-started.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
sc_int_weights = variable_scope.get_variable(
"linear_model/sc_int/weights", shape=[10, 1], initializer=ones())
sc_hash_weights = variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"linear_model/sc_keys/weights", shape=[4, 1], initializer=rand())
sc_vocab_weights = variable_scope.get_variable(
"linear_model/sc_vocab/weights", shape=[4, 1], initializer=ones())
real_bucket_weights = variable_scope.get_variable(
"linear_model/real_bucketized/weights",
shape=[5, 1],
initializer=norms())
cross_weights = variable_scope.get_variable(
"linear_model/sc_keys_X_sc_vocab/weights",
shape=[20, 1],
initializer=rand())
bias = variable_scope.get_variable(
"linear_model/bias_weights",
shape=[1],
initializer=rand())
self._write_checkpoint(sess)
(prev_int_val, prev_hash_val, prev_keys_val, prev_vocab_val,
prev_bucket_val, prev_cross_val, prev_bias_val) = sess.run([
sc_int_weights, sc_hash_weights, sc_keys_weights, sc_vocab_weights,
real_bucket_weights, cross_weights, bias
])
partitioner = lambda shape, dtype: [1] * len(shape)
# New graph, new session WITHOUT warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
self.evaluate(variables.global_variables_initializer())
# Without warm-starting, all weights should be initialized using default
# initializer (which is init_ops.zeros_initializer).
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [np.zeros([10, 1])],
sc_hash: [np.zeros([15, 1])],
sc_keys: [np.zeros([4, 1])],
sc_vocab: [np.zeros([4, 1])],
real_bucket: [np.zeros([5, 1])],
cross: [np.zeros([20, 1])],
}, sess)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
"linear_model/sc_vocab/weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started.
self._assert_cols_to_vars(cols_to_vars, {
sc_int: [prev_int_val],
sc_hash: [prev_hash_val],
sc_keys: [prev_keys_val],
sc_vocab: [prev_vocab_val],
real_bucket: [prev_bucket_val],
cross: [prev_cross_val],
"bias": [prev_bias_val],
}, sess)
def testWarmStartMoreSettings(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = self.evaluate(sc_keys_weights)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys:
np.split(prev_keys_val, 2),
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartMoreSettingsNoPartitioning(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
sc_keys_weights = variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
prev_keys_val = self.evaluate(sc_keys_weights)
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols,
partitioner=None)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*(sc_keys|sc_vocab).*",
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_hash should not be warm-started. Var corresponding to sc_vocab
# should be correctly warm-started after vocab remapping.
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [prev_keys_val],
sc_hash: [np.zeros([15, 1])],
sc_vocab: [np.array([[3.], [2.], [1.], [0.5], [0.], [0.]])]
}, sess)
def testWarmStartVarsToWarmstartIsNone(self):
# Create old and new vocabs for sparse column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry",
"blueberry"], "new_vocab")
# Create feature columns.
sc_hash = fc.categorical_column_with_hash_bucket(
"sc_hash", hash_bucket_size=15)
sc_keys = fc.categorical_column_with_vocabulary_list(
"sc_keys", vocabulary_list=["a", "b", "c", "e"])
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
all_linear_cols = [sc_hash, sc_keys, sc_vocab]
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_hash/weights", shape=[15, 1], initializer=norms())
variable_scope.get_variable(
"some_other_name", shape=[4, 1], initializer=rand())
variable_scope.get_variable(
"linear_model/sc_vocab/weights",
initializer=[[0.5], [1.], [2.], [3.]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = self._create_linear_model(all_linear_cols, _partitioner)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path)
ws_util.warm_start(
self.get_temp_dir(),
# The special value of None here will ensure that only the variable
# specified in var_name_to_vocab_info (sc_vocab embedding) is
# warm-started.
vars_to_warm_start=None,
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[sc_vocab]): vocab_info
},
# Even though this is provided, the None value for
# vars_to_warm_start overrides the logic, and this will not be
# warm-started.
var_name_to_prev_var_name={
ws_util._infer_var_name(cols_to_vars[sc_keys]):
"some_other_name"
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# sc_vocab should be correctly warm-started after vocab remapping,
# and neither of the other two should be warm-started..
self._assert_cols_to_vars(cols_to_vars, {
sc_keys: [np.zeros([2, 1]), np.zeros([2, 1])],
sc_hash: [np.zeros([8, 1]), np.zeros([7, 1])],
sc_vocab: [
np.array([[3.], [2.], [1.]]),
np.array([[0.5], [0.], [0.]])
]
}, sess)
def testWarmStartEmbeddingColumn(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"input_layer/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab_column = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab_column]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.input_layer(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
var_name_to_vocab_info={
ws_util._infer_var_name(cols_to_vars[emb_vocab_column]):
vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab_column should be correctly warm-started after vocab
# remapping. Missing values are filled in with the EmbeddingColumn's
# initializer.
self._assert_cols_to_vars(
cols_to_vars, {
emb_vocab_column: [
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
}, sess)
def testWarmStartEmbeddingColumnLinearModel(self):
# Create old and new vocabs for embedding column "sc_vocab".
prev_vocab_path = self._write_vocab(["apple", "banana", "guava", "orange"],
"old_vocab")
new_vocab_path = self._write_vocab(
["orange", "guava", "banana", "apple", "raspberry", "blueberry"],
"new_vocab")
# Save checkpoint from which to warm-start.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/embedding_weights",
initializer=[[0.5, 0.4], [1., 1.1], [2., 2.2], [3., 3.3]])
variable_scope.get_variable(
"linear_model/sc_vocab_embedding/weights",
initializer=[[0.69], [0.71]])
self._write_checkpoint(sess)
def _partitioner(shape, dtype): # pylint:disable=unused-argument
# Partition each var into 2 equal slices.
partitions = [1] * len(shape)
partitions[0] = min(2, shape.dims[0].value)
return partitions
# Create feature columns.
sc_vocab = fc.categorical_column_with_vocabulary_file(
"sc_vocab", vocabulary_file=new_vocab_path, vocabulary_size=6)
emb_vocab = fc.embedding_column(
categorical_column=sc_vocab,
dimension=2)
all_deep_cols = [emb_vocab]
# New graph, new session with warm-starting.
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
cols_to_vars = {}
with variable_scope.variable_scope("", partitioner=_partitioner):
# Create the variables.
fc.linear_model(
features=self._create_dummy_inputs(),
feature_columns=all_deep_cols,
cols_to_vars=cols_to_vars)
# Construct the vocab_info for the embedding weight.
vocab_info = ws_util.VocabInfo(
new_vocab=sc_vocab.vocabulary_file,
new_vocab_size=sc_vocab.vocabulary_size,
num_oov_buckets=sc_vocab.num_oov_buckets,
old_vocab=prev_vocab_path,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=init_ops.random_uniform_initializer(
minval=0.42, maxval=0.42))
ws_util.warm_start(
self.get_temp_dir(),
vars_to_warm_start=".*sc_vocab.*",
var_name_to_vocab_info={
"linear_model/sc_vocab_embedding/embedding_weights": vocab_info
})
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started. Var corresponding to
# emb_vocab should be correctly warm-started after vocab remapping.
# Missing values are filled in with the EmbeddingColumn's initializer.
self._assert_cols_to_vars(
cols_to_vars,
{
emb_vocab: [
# linear weights part 0.
np.array([[0.69]]),
# linear weights part 1.
np.array([[0.71]]),
# embedding_weights part 0.
np.array([[3., 3.3], [2., 2.2], [1., 1.1]]),
# embedding_weights part 1.
np.array([[0.5, 0.4], [0.42, 0.42], [0.42, 0.42]])
]
},
sess)
def testErrorConditions(self):
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
# List of PartitionedVariable is invalid type when warm-starting with vocab.
self.assertRaises(TypeError, ws_util._warm_start_var_with_vocab, [x],
"/tmp", 5, "/tmp", "/tmp")
# Unused variable names raises ValueError.
with ops.Graph().as_default():
with self.cached_session() as sess:
x = variable_scope.get_variable(
"x",
shape=[4, 1],
initializer=ones(),
partitioner=lambda shape, dtype: [2, 1])
self._write_checkpoint(sess)
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_vocab_info={"y": ws_util.VocabInfo("", 1, 0, "")})
self.assertRaises(
ValueError,
ws_util.warm_start,
self.get_temp_dir(),
var_name_to_prev_var_name={"y": "y2"})
def testWarmStartFromObjectBasedCheckpoint(self):
prev_val = [[0.5], [1.], [1.5], [2.]]
with ops.Graph().as_default() as g:
with self.session(graph=g):
prev_var = variable_scope.get_variable(
"fruit_weights",
initializer=prev_val)
self.evaluate(variables.global_variables_initializer())
# Save object-based checkpoint.
tracking_util.Checkpoint(v=prev_var).save(
os.path.join(self.get_temp_dir(), "checkpoint"))
with ops.Graph().as_default() as g:
with self.session(graph=g):
fruit_weights = variable_scope.get_variable(
"fruit_weights", initializer=[[0.], [0.], [0.], [0.]])
ws_util.warm_start(self.get_temp_dir())
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(prev_val, self.evaluate(fruit_weights))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/warm_starting_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for optimizers."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def get_filtered_grad_fn(grad_fn):
# `distributed_context.join()` requires that its arguments are parallel
# across threads, and in particular that `grads_and_vars` has the same
# variables in the same order.
# When computing gradients in eager mode with multiple threads, you
# can get extra variables with a gradient of `None`. This happens when
# those variables are accessed in another thread during the gradient
# computation. To get a consistent set of variables, we filter out
# those with `None` gradients.
def filtered_grad_fn(*args, **kwargs):
return [(g, v) for g, v in grad_fn(*args, **kwargs) if g is not None]
return filtered_grad_fn
def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
de-duplicated version of `indices` and `summed_values` contains the sum of
`values` slices associated with each unique index.
"""
unique_indices, new_index_positions = array_ops.unique(indices)
summed_values = math_ops.unsorted_segment_sum(
values, new_index_positions,
array_ops.shape(unique_indices)[0])
return (summed_values, unique_indices)
def _var_key(var):
# TODO(ashankar): Consolidate handling for eager and graph
if hasattr(var, "op"):
return (var.op.graph, var.op.name)
return var._unique_id # pylint: disable=protected-access
@six.add_metaclass(abc.ABCMeta)
class _OptimizableVariable(object):
"""Interface for abstracting over variables in the optimizers."""
@abc.abstractmethod
def target(self):
"""Returns the optimization target for this variable."""
raise NotImplementedError("Calling an abstract method.")
@abc.abstractmethod
def update_op(self, optimizer, g):
"""Returns the update ops for updating the variable."""
raise NotImplementedError("Calling an abstract method.")
class _RefVariableProcessor(_OptimizableVariable):
"""Processor for Variable."""
def __init__(self, v):
self._v = v
def __str__(self):
return "<_RefVariableProcessor(%s)>" % self._v
def target(self):
return self._v._ref() # pylint: disable=protected-access
def update_op(self, optimizer, g):
if isinstance(g, ops.Tensor):
update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
else:
assert isinstance(g, ops.IndexedSlices), ("Gradient ", g, " is neither a "
"tensor nor IndexedSlices.")
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
# pylint: disable=protected-access
return optimizer._apply_sparse_duplicate_indices(g, self._v)
class _DenseReadResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _DenseResourceVariableProcessor(_OptimizableVariable):
"""Processor for dense ResourceVariables."""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
# pylint: disable=protected-access
if isinstance(g, ops.IndexedSlices):
if self._v.constraint is not None:
raise RuntimeError(
"Cannot use a constraint function on a sparse variable.")
return optimizer._resource_apply_sparse_duplicate_indices(
g.values, self._v, g.indices)
update_op = optimizer._resource_apply_dense(g, self._v)
if self._v.constraint is not None:
with ops.control_dependencies([update_op]):
return self._v.assign(self._v.constraint(self._v))
else:
return update_op
class _TensorProcessor(_OptimizableVariable):
"""Processor for ordinary Tensors.
Even though a Tensor can't really be updated, sometimes it is useful to
compute the gradients with respect to a Tensor using the optimizer. Updating
the Tensor is, of course, unsupported.
"""
def __init__(self, v):
self._v = v
def target(self):
return self._v
def update_op(self, optimizer, g):
raise NotImplementedError("Trying to update a Tensor ", self._v)
def _get_processor(v):
"""The processor of v."""
if context.executing_eagerly():
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
else:
return _DenseResourceVariableProcessor(v)
if resource_variable_ops.is_resource_variable(v) and not v._in_graph_mode: # pylint: disable=protected-access
# True if and only if `v` was initialized eagerly.
return _DenseResourceVariableProcessor(v)
if v.op.type == "VarHandleOp":
return _DenseResourceVariableProcessor(v)
if isinstance(v, variables.Variable):
return _RefVariableProcessor(v)
if isinstance(v, ops.Tensor):
return _TensorProcessor(v)
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export(v1=["train.Optimizer"])
class Optimizer(
# Optimizers inherit from Trackable rather than AutoTrackable
# since they do most of their dependency management themselves (slot
# variables are special-cased, and non-slot variables are keyed to graphs).
trackable.Trackable):
"""Base class for optimizers.
This class defines the API to add Ops to train a model. You never use this
class directly, but instead instantiate one of its subclasses such as
`GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.
### Usage
```python
# Create an optimizer with the desired parameters.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Add Ops to the graph to minimize a cost by updating a list of variables.
# "cost" is a Tensor, and the list of variables contains tf.Variable
# objects.
opt_op = opt.minimize(cost, var_list=<list of variables>)
```
In the training program you will just have to run the returned Op.
```python
# Execute opt_op to do one step of training:
opt_op.run()
```
### Processing gradients before applying them.
Calling `minimize()` takes care of both computing the gradients and
applying them to the variables. If you want to process the gradients
before applying them you can instead use the optimizer in three steps:
1. Compute the gradients with `compute_gradients()`.
2. Process the gradients as you wish.
3. Apply the processed gradients with `apply_gradients()`.
Example:
```python
# Create an optimizer.
opt = GradientDescentOptimizer(learning_rate=0.1)
# Compute the gradients for a list of variables.
grads_and_vars = opt.compute_gradients(loss, <list of variables>)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
opt.apply_gradients(capped_grads_and_vars)
```
### Gating Gradients
Both `minimize()` and `compute_gradients()` accept a `gate_gradients`
argument that controls the degree of parallelism during the application of
the gradients.
The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.
<b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides
the maximum parallelism in execution, at the cost of some non-reproducibility
in the results. For example the two gradients of `matmul` depend on the input
values: With `GATE_NONE` one of the gradients could be applied to one of the
inputs _before_ the other gradient is computed resulting in non-reproducible
results.
<b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before
they are used. This prevents race conditions for Ops that generate gradients
for multiple inputs where the gradients depend on the inputs.
<b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed
before any one of them is used. This provides the least parallelism but can
be useful if you want to process all gradients before applying any of them.
### Slots
Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`
allocate and manage additional variables associated with the variables to
train. These are called <i>Slots</i>. Slots have names and you can ask the
optimizer for the names of the slots that it uses. Once you have a slot name
you can ask the optimizer for the variable it created to hold the slot value.
This can be useful if you want to log debug a training algorithm, report stats
about the slots, etc.
"""
# Values for gate_gradients.
GATE_NONE = 0
GATE_OP = 1
GATE_GRAPH = 2
def __init__(self, use_locking, name):
"""Create a new Optimizer.
This must be called by the constructors of subclasses.
Args:
use_locking: Bool. If True apply use locks to prevent concurrent updates
to variables.
name: A non-empty string. The name to use for accumulators created
for the optimizer.
Raises:
ValueError: If name is malformed.
"""
if not name:
raise ValueError("Must specify the optimizer name")
self._use_locking = use_locking
self._name = name
# Dictionary of slots.
# {slot_name :
# {_var_key(variable_to_train): slot_for_the_variable, ... },
# ... }
self._slots = {}
self._non_slot_dict = {}
# For implementing Trackable. Stores information about how to restore
# slot variables which have not yet been created
# (trackable._CheckpointPosition objects).
# {slot_name :
# {_var_key(variable_to_train): [checkpoint_position, ... ], ... },
# ... }
self._deferred_slot_restorations = {}
# TODO(isaprykin): When using a DistributionStrategy, and when an
# optimizer is created in each replica, it might be dangerous to
# rely on some Optimizer methods. When such methods are called on a
# per-replica optimizer, an exception needs to be thrown. We do
# allow creation per-replica optimizers however, because the
# compute_gradients()->apply_gradients() sequence is safe.
def get_name(self):
return self._name
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None):
"""Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `compute_gradients()` and `apply_gradients()` explicitly instead
of using this function.
Args:
loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list or tuple of `Variable` objects to update to
minimize `loss`. Defaults to the list of variables collected in
the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
was not `None`, that operation also increments `global_step`.
Raises:
ValueError: If some of the variables are not `Variable` objects.
@compatibility(eager)
When eager execution is enabled, `loss` should be a Python function that
takes no arguments and computes the value to be minimized. Minimization (and
gradient computation) is done with respect to the elements of `var_list` if
not None, else with respect to any trainable variables created during the
execution of the `loss` function. `gate_gradients`, `aggregation_method`,
`colocate_gradients_with_ops` and `grad_loss` are ignored when eager
execution is enabled.
@end_compatibility
"""
grads_and_vars = self.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
return self.apply_gradients(grads_and_vars, global_step=global_step,
name=name)
def compute_gradients(self, loss, var_list=None,
gate_gradients=GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize or a callable taking
no arguments which returns the value to minimize. When eager execution
is enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
RuntimeError: If called with eager execution enabled and `loss` is
not callable.
@compatibility(eager)
When eager execution is enabled, `gate_gradients`, `aggregation_method`,
and `colocate_gradients_with_ops` are ignored.
@end_compatibility
"""
if callable(loss):
with backprop.GradientTape() as tape:
if var_list is not None:
tape.watch(var_list)
loss_value = loss()
# Scale loss if using a "mean" loss reduction and multiple replicas.
# Have to be careful to call distribute_lib.get_loss_reduction()
# *after* loss() is evaluated, so we know what loss reduction it uses.
# TODO(josh11b): Test that we handle weight decay in a reasonable way.
loss_value = self._scale_loss(loss_value)
if var_list is None:
var_list = tape.watched_variables()
# TODO(jhseu): Figure out why GradientTape's gradients don't require loss
# to be executed.
with ops.control_dependencies([loss_value]):
grads = tape.gradient(loss_value, var_list, grad_loss)
return list(zip(grads, var_list))
# Non-callable/Tensor loss case
if context.executing_eagerly():
raise RuntimeError(
"`loss` passed to Optimizer.compute_gradients should "
"be a function when eager execution is enabled.")
# Scale loss if using a "mean" loss reduction and multiple replicas.
loss = self._scale_loss(loss)
if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP,
Optimizer.GATE_GRAPH]:
raise ValueError("gate_gradients must be one of: Optimizer.GATE_NONE, "
"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s" %
gate_gradients)
self._assert_valid_dtypes([loss])
if grad_loss is not None:
self._assert_valid_dtypes([grad_loss])
if var_list is None:
var_list = (
variables.trainable_variables() +
ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))
else:
var_list = nest.flatten(var_list)
# pylint: disable=protected-access
var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)
# pylint: enable=protected-access
processors = [_get_processor(v) for v in var_list]
if not var_list:
raise ValueError("No variables to optimize.")
var_refs = [p.target() for p in processors]
grads = gradients.gradients(
loss, var_refs, grad_ys=grad_loss,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
if gate_gradients == Optimizer.GATE_GRAPH:
grads = control_flow_ops.tuple(grads)
grads_and_vars = list(zip(grads, var_list))
self._assert_valid_dtypes(
[v for g, v in grads_and_vars
if g is not None and v.dtype != dtypes.resource])
return grads_and_vars
@staticmethod
def _scale_loss(loss_value):
ops.get_default_graph()._is_loss_scaled_by_optimizer = False # pylint: disable=protected-access
if distribute_lib.get_loss_reduction() == ds_reduce_util.ReduceOp.MEAN:
num_replicas = distribute_ctx.get_strategy().num_replicas_in_sync
if num_replicas > 1:
loss_value *= (1. / num_replicas)
ops.get_default_graph()._is_loss_scaled_by_optimizer = True # pylint: disable=protected-access
return loss_value
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
RuntimeError: If you should use `_distributed_apply()` instead.
"""
# This is a default implementation of apply_gradients() that can be shared
# by most optimizers. It relies on the subclass implementing the following
# methods: _create_slots(), _prepare(), _apply_dense(), and _apply_sparse().
# TODO(isaprykin): Get rid of `has_strategy()` check by
# always calling _distributed_apply(), using the default distribution
# as needed.
if distribute_ctx.has_strategy():
# Handle DistributionStrategy case.
if distribute_ctx.in_cross_replica_context():
raise RuntimeError("Use `_distributed_apply()` instead of "
"`apply_gradients()` in a cross-replica context.")
grads_and_vars = get_filtered_grad_fn(lambda: grads_and_vars)()
return distribute_ctx.get_replica_context().merge_call(
self._distributed_apply, args=(grads_and_vars, global_step, name))
# No DistributionStrategy case.
grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.
if not grads_and_vars:
raise ValueError("No variables provided.")
converted_grads_and_vars = []
for g, v in grads_and_vars:
if g is not None:
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError(
"Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
converted_grads_and_vars.append((g, v, p))
converted_grads_and_vars = tuple(converted_grads_and_vars)
var_list = [v for g, v, _ in converted_grads_and_vars if g is not None]
if not var_list:
raise ValueError("No gradients provided for any variable: %s." %
([str(v) for _, v, _ in converted_grads_and_vars],))
with ops.init_scope():
self._create_slots(var_list)
update_ops = []
with ops.name_scope(name, self._name) as name:
self._prepare()
for grad, var, processor in converted_grads_and_vars:
if grad is None:
continue
# We colocate all ops created in _apply_dense or _apply_sparse
# on the same device as the variable.
# TODO(apassos): figure out how to get the variable name here.
if (context.executing_eagerly() or
isinstance(var, resource_variable_ops.BaseResourceVariable)
and not var._in_graph_mode): # pylint: disable=protected-access
scope_name = ""
else:
scope_name = var.op.name
with ops.name_scope("update_" + scope_name), ops.colocate_with(var):
update_ops.append(processor.update_op(self, grad))
if global_step is None:
apply_updates = self._finish(update_ops, name)
else:
with ops.control_dependencies([self._finish(update_ops, "update")]):
with ops.colocate_with(global_step):
if isinstance(
global_step, resource_variable_ops.BaseResourceVariable):
# TODO(apassos): the implicit read in assign_add is slow; consider
# making it less so.
apply_updates = resource_variable_ops.assign_add_variable_op(
global_step.handle,
ops.convert_to_tensor(1, dtype=global_step.dtype),
name=name)
else:
apply_updates = state_ops.assign_add(global_step, 1, name=name)
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def _distributed_apply(self,
distribution,
grads_and_vars,
global_step=None,
name=None):
"""A version of `apply_gradients` for cross-replica context.
This is a version of `apply_gradients()` for when you are using a
`DistributionStrategy` and are in a cross-replica context. If in a
replica context, use `apply_gradients()` as normal.
Args:
distribution: A `DistributionStrategy` object.
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`, and then aggregated across replicas.
global_step: Optional (mirrored) `Variable` to increment by one
after the variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients across all
replicas. If `global_step` was not None, that operation also
increments `global_step`
"""
reduced_grads = distribution.extended.batch_reduce_to(
ds_reduce_util.ReduceOp.SUM, grads_and_vars)
var_list = [v for _, v in grads_and_vars]
grads_and_vars = zip(reduced_grads, var_list)
# Note that this is called in a cross-replica context.
with ops.init_scope():
self._create_slots(var_list)
def update(v, g):
"""Apply gradients to a replica variable."""
assert v is not None
try:
# Convert the grad to Tensor or IndexedSlices if necessary.
g = ops.convert_to_tensor_or_indexed_slices(g)
except TypeError:
raise TypeError("Gradient must be convertible to a Tensor"
" or IndexedSlices, or None: %s" % g)
if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):
raise TypeError(
"Gradient must be a Tensor, IndexedSlices, or None: %s" % g)
p = _get_processor(v)
if context.executing_eagerly() or (
resource_variable_ops.is_resource_variable(v) and
not v._in_graph_mode): # pylint: disable=protected-access
scope_name = v.name.split(":")[0]
else:
scope_name = v.op.name
# device_policy is set because non-mirrored tensors will be read in
# `update_op`. `_resource_apply_dense`, `lr_t`, `beta1_t` and `beta2_t`
# is an example.
with ops.name_scope("update_" + scope_name):
return p.update_op(self, g)
with ops.name_scope(name, self._name) as name:
self._prepare()
update_ops = [
op
for grad, var in grads_and_vars
for op in distribution.extended.update(
var, update, args=(grad,), group=False)
]
def finish(self, update_ops):
return self._finish(update_ops, "update")
non_slot_devices = distribution.extended.non_slot_devices(var_list)
finish_updates = distribution.extended.update_non_slot(
non_slot_devices, finish, args=(self, update_ops), group=False)
if global_step is None:
apply_updates = distribution.group(finish_updates, name=name)
else:
with ops.control_dependencies(finish_updates):
apply_updates = distribution.extended.update(
global_step, state_ops.assign_add, args=(1,),
kwargs={"name": name})
if not context.executing_eagerly():
if isinstance(apply_updates, ops.Tensor):
apply_updates = apply_updates.op
train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)
if apply_updates not in train_op:
train_op.append(apply_updates)
return apply_updates
def get_slot(self, var, name):
"""Return a slot named `name` created for `var` by the Optimizer.
Some `Optimizer` subclasses use additional variables. For example
`Momentum` and `Adagrad` use variables to accumulate updates. This method
gives access to these `Variable` objects if for some reason you need them.
Use `get_slot_names()` to get the list of slot names created by the
`Optimizer`.
Args:
var: A variable passed to `minimize()` or `apply_gradients()`.
name: A string.
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
# pylint: disable=protected-access
named_slots = self._slots.get(name, None)
if not named_slots:
return None
if hasattr(var, "_distributed_container"):
# NOTE: If this isn't patched, then there is no `handle` in
# `_resource_apply_dense`.
distributed_container = var._distributed_container()
assert distributed_container is not None
if ops.executing_eagerly_outside_functions():
key = distributed_container._unique_id
else:
key = (distributed_container.graph, distributed_container._shared_name)
# pylint: enable=protected-access
mirrored_slot = named_slots.get(key, None)
if mirrored_slot is None: return None
return mirrored_slot.get(device=var.device)
return named_slots.get(_var_key(var), None)
def get_slot_names(self):
"""Return a list of the names of slots created by the `Optimizer`.
See `get_slot()`.
Returns:
A list of strings.
"""
return sorted(self._slots.keys())
def variables(self):
"""A list of variables which encode the current state of `Optimizer`.
Includes slot variables and additional global variables created by the
optimizer in the current default graph.
Returns:
A list of variables.
"""
current_graph = ops.get_default_graph()
def _from_current_graph(variable):
if variable._in_graph_mode: # pylint: disable=protected-access
return variable.op.graph is current_graph
else:
# No variable.op in eager mode. We don't expect lots of eager graphs,
# but behavior should be consistent with graph mode.
return variable._graph_key == current_graph._graph_key # pylint: disable=protected-access
optimizer_variables = [v for v in self._non_slot_variables()
if _from_current_graph(v)]
for _, variable_dict in self._slots.items():
for _, slot_for_variable in variable_dict.items():
if _from_current_graph(slot_for_variable):
optimizer_variables.append(slot_for_variable)
# Sort variables by name so that the return is deterministic.
return sorted(optimizer_variables, key=lambda v: v.name)
def _create_non_slot_variable(self, initial_value, name, colocate_with):
"""Add an extra variable, not associated with a slot."""
# Recommendation: Use OptimizerV2 if your optimizer uses non-slot variables.
eager = context.executing_eagerly()
graph = None if eager else colocate_with.graph
key = (name, graph)
v = self._non_slot_dict.get(key, None)
if v is None:
self._maybe_initialize_trackable()
distribution_strategy = distribute_ctx.get_strategy()
with distribution_strategy.extended.colocate_vars_with(colocate_with):
if eager:
restored_initial_value = self._preload_simple_restoration(
name=name, shape=None)
if restored_initial_value is not None:
initial_value = restored_initial_value
v = variable_scope.variable(
initial_value, name=name, trainable=False,
use_resource=resource_variable_ops.is_resource_variable(
colocate_with))
# Restore this variable by name if necessary, but don't add a
# Trackable dependency. Optimizers return the current graph's
# non-slot variables from _checkpoint_dependencies explicitly rather
# than unconditionally adding dependencies (since there may be multiple
# non-slot variables with the same name in different graphs, trying to
# save all of them would result in errors).
self._handle_deferred_dependencies(name=name, trackable=v)
self._non_slot_dict[key] = v
return v
@property
def _checkpoint_dependencies(self):
"""From Trackable. Gather graph-specific non-slot variables to save."""
current_graph_non_slot_variables = []
current_graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
for (name, _), variable_object in sorted(self._non_slot_dict.items(),
# Avoid comparing graphs
key=lambda item: item[0][0]):
if variable_object._graph_key == current_graph_key: # pylint: disable=protected-access
current_graph_non_slot_variables.append(
trackable.TrackableReference(
name=name, ref=variable_object))
return (super(Optimizer, self)._checkpoint_dependencies
+ current_graph_non_slot_variables)
def _lookup_dependency(self, name):
"""From Trackable. Find a non-slot variable in the current graph."""
unconditional = super(Optimizer, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
graph = None if context.executing_eagerly() else ops.get_default_graph()
return self._get_non_slot_variable(name, graph=graph)
def _get_non_slot_variable(self, name, graph=None):
non_slot = self._non_slot_dict.get((name, graph), None)
if hasattr(non_slot, "_distributed_container"):
# This is a mirrored non-slot. In order to enable code like `_finish`
# to assign to a non-slot, return the current context replica.
return non_slot.get()
else:
return non_slot
def _non_slot_variables(self):
"""Additional variables created by the `Optimizer`.
Returns:
A list or tuple of variables.
"""
return self._non_slot_dict.values()
def _assert_valid_dtypes(self, tensors):
"""Asserts tensors are all valid types (see `_valid_dtypes`).
Args:
tensors: Tensors to check.
Raises:
ValueError: If any tensor is not a valid type.
"""
valid_dtypes = self._valid_dtypes()
for t in tensors:
dtype = t.dtype.base_dtype
if dtype not in valid_dtypes:
raise ValueError(
"Invalid type %r for %s, expected: %s." % (
dtype, t.name, [v for v in valid_dtypes]))
# --------------
# Methods to be implemented by subclasses if they want to use the
# inherited implementation of apply_gradients() or compute_gradients().
# --------------
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set(
[dtypes.float16, dtypes.bfloat16, dtypes.float32, dtypes.float64])
def _create_slots(self, var_list):
"""Create all slots needed by the variables.
Args:
var_list: A list of `Variable` objects.
"""
# No slots needed by default
pass
def _prepare(self):
"""Create all needed tensors before applying gradients.
This is called with the name_scope using the "name" that
users have chosen for the application of gradients.
"""
pass
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _resource_apply_dense(self, grad, handle):
"""Add ops to apply dense gradients to the variable `handle`.
Args:
grad: a `Tensor` representing the gradient.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
def _apply_sparse_duplicate_indices(self, grad, var):
"""Add ops to apply sparse gradients to `var`, with repeated sparse indices.
Optimizers which override this method must deal with IndexedSlices objects
such as the following:
IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])
The correct interpretation is:
IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])
Many optimizers deal incorrectly with repeated indices when updating based
on sparse gradients (e.g. summing squares rather than squaring the sum, or
applying momentum terms multiple times). Adding first is always the correct
behavior, so this is enforced here by reconstructing the IndexedSlices to
have only unique indices, then calling _apply_sparse.
Optimizers which deal correctly with repeated indices may instead override
this method to avoid the overhead of summing indices.
Args:
grad: `IndexedSlices`.
var: A `Variable` object.
Returns:
An `Operation`.
"""
summed_values, unique_indices = _deduplicate_indexed_slices(
values=grad.values, indices=grad.indices)
gradient_no_duplicate_indices = ops.IndexedSlices(
indices=unique_indices,
values=summed_values,
dense_shape=grad.dense_shape)
return self._apply_sparse(gradient_no_duplicate_indices, var)
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
Returns:
An `Operation`.
"""
raise NotImplementedError()
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
def _slot_dict(self, slot_name):
"""Returns a dict for caching slots created under the given name.
Args:
slot_name: Name for the slot.
Returns:
A dict that maps primary `Variable` objects to the slot created
for that variable, under the given slot name.
"""
named_slots = self._slots.get(slot_name, None)
if named_slots is None:
named_slots = {}
self._slots[slot_name] = named_slots
return named_slots
def _get_or_make_slot(self, var, val, slot_name, op_name):
"""Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot(var, val, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,
slot_name, op_name):
"""Find or create a slot for a variable, using an Initializer.
Args:
var: A `Variable` object.
initializer: An `Initializer`. The initial value of the slot.
shape: Shape of the initial value of the slot.
dtype: Type of the value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot_with_initializer(
var, initializer, shape, dtype, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
def _zeros_slot(self, var, slot_name, op_name):
"""Find or create a slot initialized with 0.0.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_zeros_slot(var, op_name)
self._restore_slot_variable(
slot_name=slot_name, variable=var,
slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
# --------------
# For implementing the Trackable interface.
# --------------
def _restore_slot_variable(self, slot_name, variable, slot_variable):
"""Restore a newly created slot variable's value."""
variable_key = _var_key(variable)
deferred_restorations = self._deferred_slot_restorations.get(
slot_name, {}).pop(variable_key, [])
# Iterate over restores, highest restore UID first to minimize the number
# of assignments.
deferred_restorations.sort(key=lambda position: position.restore_uid,
reverse=True)
for checkpoint_position in deferred_restorations:
checkpoint_position.restore(slot_variable)
def _create_or_restore_slot_variable(
self, slot_variable_position, slot_name, variable):
"""Restore a slot variable's value, possibly creating it.
Called when a variable which has an associated slot variable is created or
restored. When executing eagerly, we create the slot variable with a
restoring initializer.
No new variables are created when graph building. Instead,
_restore_slot_variable catches these after normal creation and adds restore
ops to the graph. This method is nonetheless important when graph building
for the case when a slot variable has already been created but `variable`
has just been added to a dependency graph (causing us to realize that the
slot variable needs to be restored).
Args:
slot_variable_position: A `trackable._CheckpointPosition` object
indicating the slot variable `Trackable` object to be restored.
slot_name: The name of this `Optimizer`'s slot to restore into.
variable: The variable object this slot is being created for.
"""
named_slots = self._slot_dict(slot_name)
variable_key = _var_key(variable)
slot_variable = named_slots.get(variable_key, None)
if (slot_variable is None and context.executing_eagerly() and
slot_variable_position.is_simple_variable()
# Defer slot variable creation if there is an active variable creator
# scope. Generally we'd like to eagerly create/restore slot variables
# when possible, but this may mean that scopes intended to catch
# `variable` also catch its eagerly created slot variable
# unintentionally (specifically make_template would add a dependency on
# a slot variable if not for this case). Deferring is mostly harmless
# (aside from double initialization), and makes variable creator scopes
# behave the same way they do when graph building.
and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access
initializer = trackable.CheckpointInitialValue(
checkpoint_position=slot_variable_position)
slot_variable = self._get_or_make_slot(
var=variable,
val=initializer,
slot_name=slot_name,
op_name=self._name)
# Slot variables are not owned by any one object (because we don't want to
# save the slot variable if the optimizer is saved without the non-slot
# variable, or if the non-slot variable is saved without the optimizer;
# it's a dependency hypergraph with edges of the form (optimizer, non-slot
# variable, variable)). So we don't _track_ slot variables anywhere, and
# instead special-case this dependency and otherwise pretend it's a normal
# graph.
if slot_variable is not None:
# If we've either made this slot variable, or if we've pulled out an
# existing slot variable, we should restore it.
slot_variable_position.restore(slot_variable)
else:
# We didn't make the slot variable. Defer restoring until it gets created
# normally. We keep a list rather than the one with the highest restore
# UID in case slot variables have their own dependencies, in which case
# those could differ between restores.
self._deferred_slot_restorations.setdefault(
slot_name, {}).setdefault(variable_key, []).append(
slot_variable_position)
def _call_if_callable(self, param):
"""Call the function if param is callable."""
return param() if callable(param) else param
|
tensorflow-master
|
tensorflow/python/training/optimizer.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(test.TestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var = var + accum * lr * momentum
accum = accum * momentum + g
var = var - lr * accum
var = var - accum * lr * momentum
return var, accum
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, name="var1_%d" % i)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
momentum = lambda: 0.9
if not use_callable_params:
learning_rate = learning_rate()
momentum = momentum()
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
if not context.executing_eagerly():
self.assertFalse(slot0 in variables.trainable_variables())
self.assertFalse(slot1 in variables.trainable_variables())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
if not context.executing_eagerly():
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
if context.executing_eagerly():
mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
else:
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testBasic(self):
with self.cached_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
def testVariablesAcrossGraphs(self):
optimizer = momentum_lib.MomentumOptimizer(0.01, 0.5)
with ops.Graph().as_default():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var0")
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var1")
loss = math_ops.reduce_sum(var0 + var1)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var0")
self.assertStartsWith(optimizer_variables[1].name, "var1")
self.assertEquals(2, len(optimizer_variables))
with ops.Graph().as_default():
var2 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var2")
var3 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var3")
loss = math_ops.reduce_sum(var2 + var3)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var2")
self.assertStartsWith(optimizer_variables[1].name, "var3")
self.assertEquals(2, len(optimizer_variables))
@test_util.run_deprecated_v1
def testNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
cost = 5 * var0 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
grads = []
for t in range(1, 5):
grads.append(var0_np * 10)
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
loss = 5 * var0 * var0 + 3 * var1
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
x_feed = array_ops.placeholder(dtype)
y_feed = ops.IndexedSlices(
x_feed, constant_op.constant([0, 1]), constant_op.constant([2]))
grads_and_vars = [(y_feed, var0), (constant_op.constant(
[3.0, 3.0], dtype=dtype), var1)]
opt_update = mom_op.apply_gradients(grads_and_vars)
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_update.run(feed_dict={x_feed: grads[t - 1]})
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loss():
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
return pred * pred
# pylint: enable=cell-var-from-loop
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeWith2DIndiciesForEmbeddingLookup(self):
# This test invokes the ResourceSparseApplyMomentum operation, which
# did not have a registered GPU kernel as of April 2018. With graph
# execution, the placement algorithm notices this and automatically
# places the variable in CPU (host) memory. With eager execution,
# the variable would be placed in GPU memory if available, which
# would then conflict with the future invocation of the
# ResourceSparseApplyMomentum operation.
# To work around this discrepancy, for now we force the variable
# to be placed on CPU.
with ops.device("/cpu:0"):
var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
def loss():
return math_ops.reduce_sum(embedding_ops.embedding_lookup(var0, [[1]]))
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(sgd_op)
self.assertAllCloseAccordingToType([[1, 1], [0, 0]], self.evaluate(var0))
@test_util.run_deprecated_v1
def testTensorLearningRateAndMomentum(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [
0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018,
0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615
]
db_out[0] = [
-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018,
-0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618
]
db_grad[1] = [
0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378,
0.5513742, 0.94687688, 0.16012503, 0.22159521
]
db_out[1] = [
-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884,
-0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544
]
db_grad[2] = [
0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965,
0.31168157, 0.43203235, 0.16792089, 0.24644311
]
db_out[2] = [
-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978,
-0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189
]
db_grad[3] = [
0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098,
0.81454384, 0.03848977, 0.89759839, 0.93665648
]
db_out[3] = [
-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105,
-0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303
]
db_grad[4] = [
0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359,
0.69107032, 0.81897682, 0.5433259, 0.67860287
]
db_out[4] = [
-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165,
-0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544
]
db_grad[5] = [
0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563,
0.84163809, 0.41172323, 0.83259648, 0.44941229
]
db_out[5] = [
-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094,
-0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717
]
db_grad[6] = [
0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221,
0.73577434, 0.16014607, 0.57500273, 0.071136251
]
db_out[6] = [
-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685,
-0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997
]
db_grad[7] = [
0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646,
0.74053431, 0.16033, 0.66625422, 0.73515922
]
db_out[7] = [
-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838,
-0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418
]
db_grad[8] = [
0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039,
0.55561525, 0.22567581, 0.93331909, 0.29438227
]
db_out[8] = [
-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527,
-0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781
]
db_grad[9] = [
0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893,
0.68593478, 0.50580865, 0.12602448, 0.093537711
]
db_out[9] = [
-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302,
-0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295
]
# pylint: enable=line-too-long
return db_grad, db_out
@test_util.run_deprecated_v1
def testLikeDistBeliefMom01(self):
with self.cached_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = variables.Variable([0.0] * num_samples)
grads0 = constant_op.constant([0.0] * num_samples)
mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
variables.global_variables_initializer().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), self.evaluate(var0))
@test_util.run_deprecated_v1
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype))
var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2]))
grads0 = ops.IndexedSlices(
constant_op.constant(
[[.1, .1]], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([4, 2]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[[.01, .01], [.01, .01]], dtype=dtype),
constant_op.constant([2, 3]),
constant_op.constant([4, 2]))
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], self.evaluate(var0)[0])
self.assertAllClose([0, 0], self.evaluate(var0)[1])
self.assertAllClose([1, 1], self.evaluate(var1)[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([.1, .1]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]),
self.evaluate(var1)[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]),
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]),
self.evaluate(var1)[2])
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/momentum_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rmsprop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import itertools
import math
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import rmsprop
_DATA_TYPES = [dtypes.half, dtypes.float32]
_TEST_PARAM_VALUES = [
# learning_rate, decay, momentum, epsilon, centered, use_resource
[0.5, 0.9, 0.0, 1e-3, True, False],
[0.5, 0.9, 0.0, 1e-3, False, False],
[0.5, 0.9, 0.0, 1e-3, True, True],
[0.5, 0.9, 0.0, 1e-3, False, True],
[0.1, 0.9, 0.0, 1e-3, True, False],
[0.5, 0.95, 0.0, 1e-3, False, False],
[0.5, 0.95, 0.0, 1e-5, True, False],
[0.5, 0.95, 0.9, 1e-5, True, False],
]
_TESTPARAMS = [
[data_type] + values
for data_type, values in itertools.product(_DATA_TYPES, _TEST_PARAM_VALUES)
]
class RMSPropOptimizerTest(test.TestCase):
def _rmsprop_update_numpy(self, var, g, mg, rms, mom, lr, decay, momentum,
epsilon, centered):
rms_t = rms * decay + (1 - decay) * g * g
denom_t = rms_t + epsilon
if centered:
mg_t = mg * decay + (1 - decay) * g
denom_t -= mg_t * mg_t
else:
mg_t = mg
mom_t = momentum * mom + lr * g / np.sqrt(denom_t, dtype=denom_t.dtype)
var_t = var - mom_t
return var_t, mg_t, rms_t, mom_t
def _sparse_rmsprop_update_numpy(self, var, gindexs, gvalues, mg, rms, mom,
lr, decay, momentum, epsilon, centered):
mg_t = copy.deepcopy(mg)
rms_t = copy.deepcopy(rms)
mom_t = copy.deepcopy(mom)
var_t = copy.deepcopy(var)
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
rms_t[gindex] = rms[gindex] * decay + (1 - decay) * gvalue * gvalue
denom_t = rms_t[gindex] + epsilon
if centered:
mg_t[gindex] = mg_t[gindex] * decay + (1 - decay) * gvalue
denom_t -= mg_t[gindex] * mg_t[gindex]
mom_t[gindex] = momentum * mom[gindex] + lr * gvalue / np.sqrt(denom_t)
var_t[gindex] = var[gindex] - mom_t[gindex]
return var_t, mg_t, rms_t, mom_t
@test_util.run_deprecated_v1
def testDense(self):
# TODO(yori): Use ParameterizedTest when available
for (dtype, learning_rate, decay, momentum,
epsilon, centered, use_resource) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.2], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.2], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = rmsprop.RMSPropOptimizer(
learning_rate=learning_rate,
decay=decay,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSProp
for _ in range(1, 5):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._rmsprop_update_numpy(
var0_np, grads0_np, mg0_np, rms0_np, mom0_np, learning_rate,
decay, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._rmsprop_update_numpy(
var1_np, grads1_np, mg1_np, rms1_np, mom1_np, learning_rate,
decay, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.0,
momentum=0.0,
epsilon=0.0,
centered=False).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[0., 1.]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariableCentered(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
loss = pred * pred
sgd_op = rmsprop.RMSPropOptimizer(
learning_rate=1.0,
decay=0.0,
momentum=0.0,
epsilon=1.0,
centered=True).minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
@test_util.run_deprecated_v1
def testSparse(self):
# TODO(yori): Use ParameterizedTest when available
for (dtype, learning_rate, decay,
momentum, epsilon, centered, _) in _TESTPARAMS:
with test_util.use_gpu():
# Initialize variables for numpy implementation.
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([1]))
grads1_np_indices = np.array([1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([1]))
opt = rmsprop.RMSPropOptimizer(
learning_rate=learning_rate,
decay=decay,
momentum=momentum,
epsilon=epsilon,
centered=centered)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
mg0 = opt.get_slot(var0, "mg")
self.assertEqual(mg0 is not None, centered)
mg1 = opt.get_slot(var1, "mg")
self.assertEqual(mg1 is not None, centered)
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
mg0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mg1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
rms0_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
rms1_np = np.array([1.0, 1.0], dtype=dtype.as_numpy_dtype)
mom0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSProp
for _ in range(1, 5):
self.evaluate(update)
var0_np, mg0_np, rms0_np, mom0_np = self._sparse_rmsprop_update_numpy(
var0_np, grads0_np_indices, grads0_np, mg0_np, rms0_np, mom0_np,
learning_rate, decay, momentum, epsilon, centered)
var1_np, mg1_np, rms1_np, mom1_np = self._sparse_rmsprop_update_numpy(
var1_np, grads1_np_indices, grads1_np, mg1_np, rms1_np, mom1_np,
learning_rate, decay, momentum, epsilon, centered)
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testWithoutMomentum(self):
for dtype in [dtypes.half, dtypes.float32]:
with test_util.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
opt = rmsprop.RMSPropOptimizer(
learning_rate=2.0, decay=0.9, momentum=0.0, epsilon=1.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
self.evaluate(update)
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901, 0.901]), self.evaluate(rms0))
self.assertAllCloseAccordingToType(
np.array([0.90001, 0.90001]), self.evaluate(rms1))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
self.evaluate(update)
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]),
self.evaluate(rms0))
self.assertAllCloseAccordingToType(
np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]),
self.evaluate(rms1))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0))
]), self.evaluate(var1))
@test_util.run_deprecated_v1
def testWithMomentum(self):
for dtype in [dtypes.half, dtypes.float32]:
with test_util.use_gpu():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
opt = rmsprop.RMSPropOptimizer(
learning_rate=2.0, decay=0.9, momentum=0.5, epsilon=1e-5)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
rms0 = opt.get_slot(var0, "rms")
self.assertTrue(rms0 is not None)
rms1 = opt.get_slot(var1, "rms")
self.assertTrue(rms1 is not None)
mom0 = opt.get_slot(var0, "momentum")
self.assertTrue(mom0 is not None)
mom1 = opt.get_slot(var1, "momentum")
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: rms = 1, mom = 0. So we should see a normal
# update: v -= grad * learning_rate
self.evaluate(update)
# Check the root mean square accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901, 0.901]), self.evaluate(rms0))
self.assertAllCloseAccordingToType(
np.array([0.90001, 0.90001]), self.evaluate(rms1))
# Check the momentum accumulators
self.assertAllCloseAccordingToType(
np.array([(0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
(0.1 * 2.0 / math.sqrt(0.901 + 1e-5))]),
self.evaluate(mom0))
self.assertAllCloseAccordingToType(
np.array([(0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
(0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))]),
self.evaluate(mom1))
# Check that the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
self.evaluate(update)
# Check the rms accumulators.
self.assertAllCloseAccordingToType(
np.array([0.901 * 0.9 + 0.001, 0.901 * 0.9 + 0.001]),
self.evaluate(rms0))
self.assertAllCloseAccordingToType(
np.array([0.90001 * 0.9 + 1e-5, 0.90001 * 0.9 + 1e-5]),
self.evaluate(rms1))
self.assertAllCloseAccordingToType(
np.array([
0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)),
0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))
]), self.evaluate(mom0))
self.assertAllCloseAccordingToType(
np.array([
0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)),
0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))
]), self.evaluate(mom1))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
(0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5))),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) -
(0.5 * (0.1 * 2.0 / math.sqrt(0.901 + 1e-5)) +
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1e-5)))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
(0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5))),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) -
(0.5 * (0.01 * 2.0 / math.sqrt(0.90001 + 1e-5)) +
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 2e-5)))
]), self.evaluate(var1))
def testCallableParams(self):
with context.eager_mode():
for dtype in [dtypes.half, dtypes.float32]:
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
decay = lambda: 0.9
momentum = lambda: 0.0
epsilon = lambda: 1.0
opt = rmsprop.RMSPropOptimizer(learning_rate, decay, momentum, epsilon)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the rms accumulators where 1. So we should see a normal
# update: v -= grad * learning_rate
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0))
]), self.evaluate(var1))
# Step 2: the root mean square accumulators contain the previous update.
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check the parameters.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0)),
2.0 - (0.1 * 2.0 / math.sqrt(0.901 + 1.0)) -
(0.1 * 2.0 / math.sqrt(0.901 * 0.9 + 0.001 + 1.0))
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
3.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0)),
4.0 - (0.01 * 2.0 / math.sqrt(0.90001 + 1.0)) -
(0.01 * 2.0 / math.sqrt(0.90001 * 0.9 + 1e-5 + 1.0))
]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/rmsprop_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargeVariableTest(test.TestCase):
# NOTE: This is in a separate file from saver_test.py because the
# large allocations do not play well with TSAN, and cause flaky
# failures.
def testLargeVariable(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
with session.Session("", graph=ops.Graph()) as sess:
# Declare a variable that is exactly 2GB. This should fail,
# because a serialized checkpoint includes other header
# metadata.
with ops.device("/cpu:0"):
var = variables.Variable(
constant_op.constant(
False, shape=[2, 1024, 1024, 1024], dtype=dtypes.bool))
save = saver.Saver(
{
var.op.name: var
}, write_version=saver_pb2.SaverDef.V1)
var.initializer.run()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Tensor slice is too large to serialize"):
save.save(sess, save_path)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/saver_large_variable_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.training.evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.layers import layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
_USE_GLOBAL_STEP = 0
def logistic_classifier(inputs):
return layers.dense(inputs, 1, activation=math_ops.sigmoid)
def local_variable(init_value, name):
return variable_scope.get_variable(
name,
dtype=dtypes.float32,
initializer=init_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
class EvaluateOnceTest(test.TestCase):
def setUp(self):
super(EvaluateOnceTest, self).setUp()
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss_op = losses.log_loss(labels=tf_labels, predictions=tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = optimizer.minimize(loss_op,
training.get_or_create_global_step())
with monitored_session.MonitoredTrainingSession(
checkpoint_dir=checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)]) as session:
loss = None
while not session.should_stop():
_, loss = session.run([train_op, loss_op])
if num_steps >= 300:
assert loss < .015
def testEvaluatePerfectModel(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_perfect_model_once')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
labels = constant_op.constant(self._labels, dtype=dtypes.float32)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy = metrics_module.Accuracy()
update_op = accuracy.update_state(labels, predictions)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={'accuracy': (accuracy.result(), update_op)},
hooks=[
evaluation._StopAfterNEvalsHook(1),
])
self.assertTrue(final_ops_values['accuracy'] > .99)
def testEvaluateWithFiniteInputs(self):
checkpoint_dir = os.path.join(self.get_temp_dir(),
'evaluate_with_finite_inputs')
# Train a Model to completion:
self._train_model(checkpoint_dir, num_steps=300)
# Run evaluation. Inputs are fed through input producer for one epoch.
all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
single_input, single_label = training.slice_input_producer(
[all_inputs, all_labels], num_epochs=1)
inputs, labels = training.batch([single_input, single_label], batch_size=6,
allow_smaller_final_batch=True)
logits = logistic_classifier(inputs)
predictions = math_ops.round(logits)
accuracy = metrics_module.Accuracy()
update_op = accuracy.update_state(labels, predictions)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=update_op,
final_ops={
'accuracy': (accuracy.result(), update_op),
'eval_steps': evaluation._get_or_create_eval_step()
},
hooks=[
evaluation._StopAfterNEvalsHook(None),
])
self.assertTrue(final_ops_values['accuracy'] > .99)
# Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
# each; the 3rd iter evaluates the remaining 4 inputs, and the last one
# triggers an error which stops evaluation.
self.assertEqual(final_ops_values['eval_steps'], 4)
def testEvalOpAndFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
num_evals = 5
final_increment = 9.0
my_var = local_variable(0.0, name='MyVar')
eval_ops = state_ops.assign_add(my_var, 1.0)
final_ops = array_ops.identity(my_var) + final_increment
final_hooks = [evaluation._StopAfterNEvalsHook(num_evals),]
initial_hooks = list(final_hooks)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=final_hooks)
self.assertEqual(final_ops_values['value'], num_evals + final_increment)
self.assertEqual(initial_hooks, final_hooks)
def testMultiEvalStepIncrements(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'eval_ops_and_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
num_evals = 6
my_var = local_variable(0.0, name='MyVar')
# In eval ops, we also increase the eval step one more time.
eval_ops = [state_ops.assign_add(my_var, 1.0),
state_ops.assign_add(
evaluation._get_or_create_eval_step(), 1, use_locking=True)]
expect_eval_update_counts = num_evals // 2
final_ops = array_ops.identity(my_var)
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=eval_ops,
final_ops={'value': final_ops},
hooks=[evaluation._StopAfterNEvalsHook(num_evals),])
self.assertEqual(final_ops_values['value'], expect_eval_update_counts)
def testOnlyFinalOp(self):
checkpoint_dir = os.path.join(self.get_temp_dir(), 'only_final_ops')
# Train a model for a single step to get a checkpoint.
self._train_model(checkpoint_dir, num_steps=1)
checkpoint_path = saver.latest_checkpoint(checkpoint_dir)
# Create the model so we have something to restore.
inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
logistic_classifier(inputs)
final_increment = 9.0
my_var = local_variable(0.0, name='MyVar')
final_ops = array_ops.identity(my_var) + final_increment
final_ops_values = evaluation._evaluate_once(
checkpoint_path=checkpoint_path, final_ops={'value': final_ops})
self.assertEqual(final_ops_values['value'], final_increment)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/evaluation_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
@tf_export(v1=["train.generate_checkpoint_state_proto"])
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
all_model_checkpoint_timestamps: A list of floats, indicating the number of
seconds since the Epoch when each checkpoint was generated.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
Raises:
ValueError: If `all_model_checkpoint_timestamps` was provided but its length
does not match `all_model_checkpoint_paths`.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
if (all_model_checkpoint_timestamps
and (len(all_model_checkpoint_timestamps)
!= len(all_model_checkpoint_paths))):
raise ValueError(
("Checkpoint timestamps, if provided, must match checkpoint paths (got "
"paths %s and timestamps %s)")
% (all_model_checkpoint_paths, all_model_checkpoint_timestamps))
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i in range(len(all_model_checkpoint_paths)):
p = all_model_checkpoint_paths[i]
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
return coord_checkpoint_proto
@deprecation.deprecated(
date=None,
instructions=("Use `tf.train.CheckpointManager` to manage checkpoints "
"rather than manually editing the Checkpoint proto."))
@tf_export(v1=["train.update_checkpoint_state"])
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
update_checkpoint_state_internal(
save_dir=save_dir,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
latest_filename=latest_filename,
save_relative_paths=False,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
def update_checkpoint_state_internal(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
save_relative_paths=False,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
save_relative_paths: If `True`, will write relative paths to the checkpoint
state file.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.contrib.checkpoint.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
if save_relative_paths:
if os.path.isabs(model_checkpoint_path):
rel_model_checkpoint_path = os.path.relpath(
model_checkpoint_path, save_dir)
else:
rel_model_checkpoint_path = model_checkpoint_path
rel_all_model_checkpoint_paths = []
for p in all_model_checkpoint_paths:
if os.path.isabs(p):
rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))
else:
rel_all_model_checkpoint_paths.append(p)
ckpt = generate_checkpoint_state_proto(
save_dir,
rel_model_checkpoint_path,
all_model_checkpoint_paths=rel_all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
else:
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
# Preventing potential read/write race condition by *atomically* writing to a
# file.
file_io.atomic_write_string_to_file(coord_checkpoint_filename,
text_format.MessageToString(ckpt))
@tf_export("train.get_checkpoint_state")
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,
latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(
coord_checkpoint_filename)
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError("Invalid checkpoint state loaded from "
+ checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i in range(len(ckpt.all_model_checkpoint_paths)):
p = ckpt.all_model_checkpoint_paths[i]
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e:
# It's ok if the file cannot be read
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
def _prefix_to_checkpoint_path(prefix, format_version):
"""Returns the pathname of a checkpoint file, given the checkpoint prefix.
For V1 checkpoint, simply returns the prefix itself (the data file). For V2,
returns the pathname to the index file.
Args:
prefix: a string, the prefix of a checkpoint.
format_version: the checkpoint format version that corresponds to the
prefix.
Returns:
The pathname of a checkpoint file, taking into account the checkpoint
format version.
"""
if format_version == saver_pb2.SaverDef.V2:
return prefix + ".index" # The index file identifies a checkpoint.
return prefix # Just the data file.
@tf_export("train.latest_checkpoint")
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `Saver.save()`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V2)
v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
return ckpt.model_checkpoint_path
else:
logging.error("Couldn't match files for checkpoint %s",
ckpt.model_checkpoint_path)
return None
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to check for files with this prefix.")
@tf_export(v1=["train.checkpoint_exists"])
def checkpoint_exists(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is the recommended way to check if a checkpoint exists, since it takes
into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.
"""
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if file_io.get_matching_files(pathname):
return True
elif file_io.get_matching_files(checkpoint_prefix):
return True
else:
return False
@deprecation.deprecated(
date=None,
instructions="Use standard file utilities to get mtimes.")
@tf_export(v1=["train.get_checkpoint_mtimes"])
def get_checkpoint_mtimes(checkpoint_prefixes):
"""Returns the mtimes (modification timestamps) of the checkpoints.
Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files
exist, collect their mtime. Both V2 and V1 checkpoints are considered, in
that priority.
This is the recommended way to get the mtimes, since it takes into account
the naming difference between V1 and V2 formats.
Note: If not all checkpoints exist, the length of the returned mtimes list
will be smaller than the length of `checkpoint_prefixes` list, so mapping
checkpoints to corresponding mtimes will not be possible.
Args:
checkpoint_prefixes: a list of checkpoint paths, typically the results of
`Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
Returns:
A list of mtimes (in microseconds) of the found checkpoints.
"""
mtimes = []
def match_maybe_append(pathname):
fnames = file_io.get_matching_files(pathname)
if fnames:
mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)
return True
return False
for checkpoint_prefix in checkpoint_prefixes:
# Tries V2's metadata file first.
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if match_maybe_append(pathname):
continue
# Otherwise, tries V1, where the prefix is the complete pathname.
match_maybe_append(checkpoint_prefix)
return mtimes
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to delete files with this prefix.")
@tf_export(v1=["train.remove_checkpoint"])
def remove_checkpoint(checkpoint_prefix,
checkpoint_format_version=saver_pb2.SaverDef.V2,
meta_graph_suffix="meta"):
"""Removes a checkpoint given by `checkpoint_prefix`.
Args:
checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result
of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to
`SaverDef.V2`.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
_delete_file_if_exists(
meta_graph_filename(checkpoint_prefix, meta_graph_suffix))
if checkpoint_format_version == saver_pb2.SaverDef.V2:
# V2 has a metadata file and some data files.
_delete_file_if_exists(checkpoint_prefix + ".index")
_delete_file_if_exists(checkpoint_prefix + ".data-?????-of-?????")
else:
# V1, Legacy. Exact match on the data file.
_delete_file_if_exists(checkpoint_prefix)
def _delete_file_if_exists(filespec):
"""Deletes files matching `filespec`."""
for pathname in file_io.get_matching_files(filespec):
file_io.delete_file(pathname)
def meta_graph_filename(checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
suffixed_filename = ".".join([basename, meta_graph_suffix])
return suffixed_filename
# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?
@tf_export("train.CheckpointManager")
class CheckpointManager(object):
"""Deletes old checkpoints.
Example usage:
```python
import tensorflow as tf
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.contrib.checkpoint.CheckpointManager(
checkpoint, directory="/tmp/model", max_to_keep=5)
status = checkpoint.restore(manager.latest_checkpoint)
while True:
# train
manager.save()
```
`CheckpointManager` preserves its own state across instantiations (see the
`__init__` documentation for details). Only one should be active in a
particular directory at a time.
"""
def __init__(self,
checkpoint,
directory,
max_to_keep,
keep_checkpoint_every_n_hours=None,
checkpoint_name="ckpt"):
"""Configure a `CheckpointManager` for use in `directory`.
If a `CheckpointManager` was previously used in `directory`, its
state will be restored. This includes the list of managed checkpoints and
the timestamp bookkeeping necessary to support
`keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`
will be the same as the previous `CheckpointManager`, including cleaning up
existing checkpoints if appropriate.
Checkpoints are only considered for deletion just after a new checkpoint has
been added. At that point, `max_to_keep` checkpoints will remain in an
"active set". Once a checkpoint is preserved by
`keep_checkpoint_every_n_hours` it will not be deleted by this
`CheckpointManager` or any future `CheckpointManager` instantiated in
`directory` (regardless of the new setting of
`keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the
active set may be deleted by this `CheckpointManager` or a future
`CheckpointManager` instantiated in `directory` (subject to its
`max_to_keep` and `keep_checkpoint_every_n_hours` settings).
Args:
checkpoint: The `tf.train.Checkpoint` instance to save and manage
checkpoints for.
directory: The path to a directory in which to write checkpoints. A
special file named "checkpoint" is also written to this directory (in a
human-readable text format) which contains the state of the
`CheckpointManager`.
max_to_keep: An integer, the number of checkpoints to keep. Unless
preserved by `keep_checkpoint_every_n_hours`, checkpoints will be
deleted from the active set, oldest first, until only `max_to_keep`
checkpoints remain. If `None`, no checkpoints are deleted and everything
stays in the active set. Note that `max_to_keep=None` will keep all
checkpoint paths in memory and in the checkpoint state protocol buffer
on disk.
keep_checkpoint_every_n_hours: Upon removal from the active set, a
checkpoint will be preserved if it has been at least
`keep_checkpoint_every_n_hours` since the last preserved checkpoint. The
default setting of `None` does not preserve any checkpoints in this way.
checkpoint_name: Custom name for the checkpoint file.
Raises:
ValueError: If `max_to_keep` is not a positive integer.
"""
self._checkpoint = checkpoint
self._save_counter_assign = None
if max_to_keep is not None and max_to_keep <= 0:
raise ValueError(
("Expected a positive integer or `None` for `max_to_max_to_keep`, "
"got %d.")
% (max_to_keep,))
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._directory = directory
self._checkpoint_prefix = os.path.join(directory, checkpoint_name)
recovered_state = get_checkpoint_state(directory)
current_clock = time.time()
self._maybe_delete = collections.OrderedDict()
if recovered_state is None:
self._latest_checkpoint = None
# Set the clock back slightly to avoid race conditions when quckly
# re-creating a CheckpointManager.
self._last_preserved_timestamp = current_clock - 1.
else:
self._latest_checkpoint = recovered_state.model_checkpoint_path
self._last_preserved_timestamp = recovered_state.last_preserved_timestamp
if current_clock < self._last_preserved_timestamp:
# Time seems to have reversed itself. In addition to this warning, we'll
# min() saved checkpoint timestamps with the current time to ensure that
# old checkpoints don't get deleted accidentally.
logging.warning(
("time.time() returned a value %f seconds behind the last "
"preserved checkpoint timestamp.")
% (self._last_preserved_timestamp - current_clock,))
self._last_preserved_timestamp = current_clock
all_timestamps = recovered_state.all_model_checkpoint_timestamps
all_paths = recovered_state.all_model_checkpoint_paths
del recovered_state # Uses modified values from now on
if not all_timestamps:
all_timestamps = [self._last_preserved_timestamp] * len(all_paths)
for filename, timestamp in zip(all_paths, all_timestamps):
timestamp = min(timestamp, current_clock)
if timestamp > self._last_preserved_timestamp:
self._maybe_delete[filename] = timestamp
@property
def latest_checkpoint(self):
"""The prefix of the most recent checkpoint in `directory`.
Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is
the constructor argument to `CheckpointManager`.
Suitable for passing to `tf.train.Checkpoint.restore` to resume training.
Returns:
The checkpoint prefix. If there are no checkpoints, returns `None`.
"""
return self._latest_checkpoint
@property
def checkpoints(self):
"""A list of managed checkpoints.
Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not
show up in this list (to avoid ever-growing filename lists).
Returns:
A list of filenames, sorted from oldest to newest.
"""
return list(self._maybe_delete.keys())
def _sweep(self):
"""Deletes or preserves managed checkpoints."""
if not self._max_to_keep:
# Does not update self._last_preserved_timestamp, since everything is kept
# in the active set.
return
while len(self._maybe_delete) > self._max_to_keep:
filename, timestamp = self._maybe_delete.popitem(last=False)
# Even if we're keeping this checkpoint due to
# keep_checkpoint_every_n_hours, we won't reference it to avoid
# infinitely-growing CheckpointState protos.
if (self._keep_checkpoint_every_n_hours
and (timestamp - self._keep_checkpoint_every_n_hours * 3600.
>= self._last_preserved_timestamp)):
self._last_preserved_timestamp = timestamp
continue
_delete_file_if_exists(filename + ".index")
_delete_file_if_exists(filename + ".data-?????-of-?????")
def _record_state(self):
"""Saves the `CheckpointManager`'s state in `directory`."""
filenames, timestamps = zip(*self._maybe_delete.items())
update_checkpoint_state_internal(
self._directory,
model_checkpoint_path=self.latest_checkpoint,
all_model_checkpoint_paths=filenames,
all_model_checkpoint_timestamps=timestamps,
last_preserved_timestamp=self._last_preserved_timestamp,
save_relative_paths=True)
@property
def _prefix(self):
"""A common prefix for all checkpoints saved with this manager.
For example, if `directory` (a constructor argument) were `"/tmp/tf-model"`,
`prefix` would be `"/tmp/tf-model/ckpt"` and checkpoints would generally be
numbered `"/tmp/tf-model/ckpt-1"`, `"/tmp/tf-model/ckpt-2"`, and so on. Each
checkpoint has several associated files
(e.g. `"/tmp/tf-model/ckpt-2.index"`).
Returns:
A string prefix.
"""
return self._checkpoint_prefix
def save(self, checkpoint_number=None):
"""Creates a new checkpoint and manages it.
Args:
checkpoint_number: An optional integer, or an integer-dtype `Variable` or
`Tensor`, used to number the checkpoint. If `None` (default),
checkpoints are numbered using `checkpoint.save_counter`. Even if
`checkpoint_number` is provided, `save_counter` is still incremented. A
user-provided `checkpoint_number` is not incremented even if it is a
`Variable`.
Returns:
The path to the new checkpoint. It is also recorded in the `checkpoints`
and `latest_checkpoint` properties.
"""
# Save counter logic duplicated from tf.train.Checkpoint, soon to diverge
# slightly with a custom numbering option.
if context.executing_eagerly():
save_counter = self._checkpoint.save_counter
save_counter.assign_add(1)
session = None
else:
session = ops.get_default_session()
def _initializing_creator(next_creator, **kwargs):
"""Initialize the save counter if it has been newly created."""
v = next_creator(**kwargs)
session.run(v.initializer)
return v
with variable_scope.variable_creator_scope(_initializing_creator):
save_counter = self._checkpoint.save_counter
if self._save_counter_assign is None:
self._save_counter_assign = save_counter.assign_add(1, read_value=False)
session.run(self._save_counter_assign)
if checkpoint_number is None:
checkpoint_number = save_counter
if not isinstance(checkpoint_number, compat.integral_types):
checkpoint_number = training_util.global_step(
sess=session, global_step_tensor=checkpoint_number)
prefix = "%s-%d" % (self._prefix, checkpoint_number)
save_path = self._checkpoint.write(prefix)
timestamp = time.time()
# If this is an overwritten checkpoint we were previously tracking, delete
# and reinsert it to make sure it goes to the end of the queue.
if save_path in self._maybe_delete:
del self._maybe_delete[save_path]
self._maybe_delete[save_path] = timestamp
self._latest_checkpoint = save_path
# Before deleting anything we update the Checkpoint proto with the new
# checkpoint. We'll go back and correct it after cleaning up old files, but
# a preemption while deleting will be more likely to see the new checkpoint
# this way.
self._record_state()
self._sweep()
# Write out the Checkpoint proto a second time, now without the deleted
# checkpoints.
self._record_state()
return save_path
|
tensorflow-master
|
tensorflow/python/training/checkpoint_management.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad Dual Averaging for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.AdagradDAOptimizer"])
class AdagradDAOptimizer(optimizer.Optimizer):
"""Adagrad Dual Averaging algorithm for sparse linear models.
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
This optimizer takes care of regularization of unseen features in a mini batch
by updating them when they are seen with a closed form update rule that is
equivalent to having updated them on every mini-batch.
AdagradDA is typically used when there is a need for large sparsity in the
trained model. This optimizer only guarantees sparsity for linear models. Be
careful when using AdagradDA for deep networks as it will require careful
initialization of the gradient accumulators for it to train.
"""
def __init__(self,
learning_rate,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="AdagradDA"):
"""Construct a new AdagradDA optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
global_step: A `Tensor` containing the current training step number.
initial_gradient_squared_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "AdagradDA".
Raises:
ValueError: If the `initial_gradient_squared_accumulator_value` is
invalid.
"""
if initial_gradient_squared_accumulator_value <= 0.0:
raise ValueError("initial_gradient_squared_accumulator_value must be "
"positive: %s" %
initial_gradient_squared_accumulator_value)
super(AdagradDAOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_gradient_squared_accumulator_value = (
initial_gradient_squared_accumulator_value)
# Created in Initialize.
self._learning_rate_tensor = None
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._global_step = global_step
self._global_step_on_worker = None
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
g_val = constant_op.constant(
0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype)
gg_val = constant_op.constant(
self._initial_gradient_squared_accumulator_value,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
self._get_or_make_slot(v, g_val, "gradient_accumulator", self._name)
self._get_or_make_slot(v, gg_val, "gradient_squared_accumulator",
self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
with ops.colocate_with(self._learning_rate_tensor):
self._global_step_on_worker = array_ops.identity(self._global_step) + 1
def _apply_dense(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.apply_adagrad_da(
var,
g_acc,
gg_acc,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.resource_apply_adagrad_da(
var.handle,
g_acc.handle,
gg_acc.handle,
grad,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, grad.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, grad.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.sparse_apply_adagrad_da(
var,
g_acc,
gg_acc,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
with ops.device(var.device):
global_step = array_ops.identity(self._global_step_on_worker)
return training_ops.resource_sparse_apply_adagrad_da(
var.handle,
g_acc.handle,
gg_acc.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength, grad.dtype),
math_ops.cast(self._l2_regularization_strength, grad.dtype),
global_step,
use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/python/training/adagrad_da.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import shutil
import tempfile
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_module
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.training.tracking import util
class LatestCheckpointWithRelativePaths(test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@test_util.run_deprecated_v1
def testNameCollision(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
# Collides with the default name of the checkpoint state file.
filepath = os.path.join(traindir, "checkpoint")
with self.cached_session() as sess:
unused_a = variables.Variable(0.0) # So that Saver saves something.
variables.global_variables_initializer().run()
# Should fail.
saver = saver_module.Saver(sharded=False)
with self.assertRaisesRegexp(ValueError, "collides with"):
saver.save(sess, filepath)
# Succeeds: the file will be named "checkpoint-<step>".
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(
checkpoint_management.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath)
self.assertIsNotNone(
checkpoint_management.latest_checkpoint(traindir))
# Succeeds: the file will be named "checkpoint-<step>-<i>-of-<n>".
saver = saver_module.Saver(sharded=True)
saver.save(sess, filepath, global_step=1)
self.assertIsNotNone(
checkpoint_management.latest_checkpoint(traindir))
@test_util.run_deprecated_v1
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.cached_session() as sess:
# Build a simple graph.
v0 = variables.Variable(0.0)
inc = v0.assign_add(1.0)
save = saver_module.Saver({"v0": v0})
# Record a short training history.
variables.global_variables_initializer().run()
save.save(sess, filepath, global_step=0)
self.evaluate(inc)
save.save(sess, filepath, global_step=1)
self.evaluate(inc)
save.save(sess, filepath, global_step=2)
with self.cached_session() as sess:
# Build a new graph with different initialization.
v0 = variables.Variable(-1.0)
# Create a new saver.
save = saver_module.Saver({"v0": v0})
variables.global_variables_initializer().run()
# Get the most recent checkpoint name from the training history file.
name = checkpoint_management.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
class CheckpointStateTest(test.TestCase):
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._get_test_dir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = checkpoint_management.generate_checkpoint_state_proto(
save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = checkpoint_management.generate_checkpoint_state_proto(
train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._get_test_dir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = checkpoint_management.generate_checkpoint_state_proto(
save_dir, abs_path, all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = os.path.join("train", "model-2")
checkpoint_management.update_checkpoint_state(
train_dir, rel_path, all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = checkpoint_management.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
def testUpdateCheckpointStateSaveRelativePaths(self):
save_dir = self._get_test_dir("update_checkpoint_state")
os.chdir(save_dir)
abs_path2 = os.path.join(save_dir, "model-2")
rel_path2 = "model-2"
abs_path0 = os.path.join(save_dir, "model-0")
rel_path0 = "model-0"
checkpoint_management.update_checkpoint_state_internal(
save_dir=save_dir,
model_checkpoint_path=abs_path2,
all_model_checkpoint_paths=[rel_path0, abs_path2],
save_relative_paths=True)
# File should contain relative paths.
file_content = file_io.read_file_to_string(
os.path.join(save_dir, "checkpoint"))
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
self.assertEqual(ckpt.model_checkpoint_path, rel_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], rel_path0)
# get_checkpoint_state should return absolute paths.
ckpt = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path, abs_path2)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path2)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path0)
def testCheckPointStateFailsWhenIncomplete(self):
save_dir = self._get_test_dir("checkpoint_state_fails_when_incomplete")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("")
ckpt_file.close()
with self.assertRaises(ValueError):
checkpoint_management.get_checkpoint_state(save_dir)
def testCheckPointCompletesRelativePaths(self):
save_dir = self._get_test_dir("checkpoint_completes_relative_paths")
os.chdir(save_dir)
ckpt_path = os.path.join(save_dir, "checkpoint")
ckpt_file = open(ckpt_path, "w")
ckpt_file.write("""
model_checkpoint_path: "./model.ckpt-687529"
all_model_checkpoint_paths: "./model.ckpt-687500"
all_model_checkpoint_paths: "./model.ckpt-687529"
""")
ckpt_file.close()
ckpt = checkpoint_management.get_checkpoint_state(save_dir)
self.assertEqual(ckpt.model_checkpoint_path,
os.path.join(save_dir, "./model.ckpt-687529"))
self.assertEqual(ckpt.all_model_checkpoint_paths[0],
os.path.join(save_dir, "./model.ckpt-687500"))
self.assertEqual(ckpt.all_model_checkpoint_paths[1],
os.path.join(save_dir, "./model.ckpt-687529"))
class SaverUtilsTest(test.TestCase):
def setUp(self):
self._base_dir = os.path.join(self.get_temp_dir(), "saver_utils_test")
gfile.MakeDirs(self._base_dir)
def tearDown(self):
gfile.DeleteRecursively(self._base_dir)
@test_util.run_deprecated_v1
def testCheckpointExists(self):
for sharded in (False, True):
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(sharded=sharded, write_version=version)
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
self.assertFalse(
checkpoint_management.checkpoint_exists(path)) # Not saved yet.
ckpt_prefix = saver.save(sess, path)
self.assertTrue(checkpoint_management.checkpoint_exists(ckpt_prefix))
ckpt_prefix = checkpoint_management.latest_checkpoint(self._base_dir)
self.assertTrue(checkpoint_management.checkpoint_exists(ckpt_prefix))
@test_util.run_deprecated_v1
def testGetCheckpointMtimes(self):
prefixes = []
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(write_version=version)
prefixes.append(
saver.save(sess, os.path.join(self._base_dir, str(version))))
mtimes = checkpoint_management.get_checkpoint_mtimes(prefixes)
self.assertEqual(2, len(mtimes))
self.assertTrue(mtimes[1] >= mtimes[0])
@test_util.run_deprecated_v1
def testRemoveCheckpoint(self):
for sharded in (False, True):
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
with self.session(graph=ops_lib.Graph()) as sess:
unused_v = variables.Variable(1.0, name="v")
variables.global_variables_initializer().run()
saver = saver_module.Saver(sharded=sharded, write_version=version)
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
ckpt_prefix = saver.save(sess, path)
self.assertTrue(checkpoint_management.checkpoint_exists(ckpt_prefix))
checkpoint_management.remove_checkpoint(ckpt_prefix, version)
self.assertFalse(checkpoint_management.checkpoint_exists(ckpt_prefix))
class CheckpointManagerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testDeletion(self):
checkpoint = util.Checkpoint()
manager = checkpoint_management.CheckpointManager(
checkpoint, self.get_temp_dir(), max_to_keep=3)
first_path = manager.save()
second_path = manager.save()
third_path = manager.save()
fourth_path = manager.save()
self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path))
self.assertTrue(checkpoint_management.checkpoint_exists(third_path))
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
self.assertFalse(checkpoint_management.checkpoint_exists(first_path))
@test_util.run_in_graph_and_eager_modes
def testKeepAll(self):
checkpoint = util.Checkpoint()
directory = os.path.join(
self.get_temp_dir(),
# Avoid sharing directories between eager and graph
# TODO(allenl): stop run_in_graph_and_eager_modes reusing directories
str(context.executing_eagerly()))
manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=None)
first_path = manager.save()
second_path = manager.save()
third_path = manager.save()
self.assertTrue(checkpoint_management.checkpoint_exists(third_path))
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
self.assertTrue(checkpoint_management.checkpoint_exists(first_path))
self.assertEqual(third_path, manager.latest_checkpoint)
self.assertEqual([first_path, second_path, third_path],
manager.checkpoints)
del manager
manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=None)
fourth_path = manager.save()
self.assertEqual([first_path, second_path, third_path, fourth_path],
manager.checkpoints)
del manager
manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=3)
self.assertEqual([first_path, second_path, third_path, fourth_path],
manager.checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path))
self.assertTrue(checkpoint_management.checkpoint_exists(third_path))
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
self.assertTrue(checkpoint_management.checkpoint_exists(first_path))
fifth_path = manager.save()
self.assertEqual([third_path, fourth_path, fifth_path],
manager.checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(fifth_path))
self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path))
self.assertTrue(checkpoint_management.checkpoint_exists(third_path))
self.assertFalse(checkpoint_management.checkpoint_exists(second_path))
self.assertFalse(checkpoint_management.checkpoint_exists(first_path))
@test_util.run_in_graph_and_eager_modes
@test.mock.patch.object(checkpoint_management, "time")
def testSaveRestoreState(self, mock_time):
directory = self.get_temp_dir()
mock_time.time.return_value = 3.
checkpoint = util.Checkpoint()
first_manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=2)
first_time = 10000.
first_name = os.path.join(directory, "ckpt-1")
mock_time.time.return_value = first_time
first_manager.save()
state = checkpoint_management.get_checkpoint_state(directory)
second_time = first_time + 3610.
second_name = os.path.join(directory, "ckpt-2")
mock_time.time.return_value = second_time
first_manager.save()
state = checkpoint_management.get_checkpoint_state(directory)
self.assertEqual([first_time, second_time],
state.all_model_checkpoint_timestamps)
self.assertEqual([first_name, second_name], first_manager.checkpoints)
self.assertEqual(second_name, first_manager.latest_checkpoint)
del first_manager
second_manager = checkpoint_management.CheckpointManager(
checkpoint, directory,
max_to_keep=2, keep_checkpoint_every_n_hours=1.5)
self.assertEqual([first_name, second_name], second_manager.checkpoints)
self.assertEqual(second_name, second_manager.latest_checkpoint)
third_name = os.path.join(directory, "ckpt-3")
third_time = second_time + 3600. * 0.2
mock_time.time.return_value = third_time
second_manager.save()
self.assertTrue(checkpoint_management.checkpoint_exists(first_name))
self.assertTrue(checkpoint_management.checkpoint_exists(second_name))
self.assertEqual([second_name, third_name],
second_manager.checkpoints)
state = checkpoint_management.get_checkpoint_state(directory)
self.assertEqual(first_time, state.last_preserved_timestamp)
fourth_time = third_time + 3600. * 0.5
mock_time.time.return_value = fourth_time
fourth_name = os.path.join(directory, "ckpt-4")
second_manager.save()
self.assertTrue(checkpoint_management.checkpoint_exists(first_name))
self.assertFalse(checkpoint_management.checkpoint_exists(second_name))
self.assertEqual([third_name, fourth_name],
second_manager.checkpoints)
fifth_time = fourth_time + 3600. * 0.5
mock_time.time.return_value = fifth_time
fifth_name = os.path.join(directory, "ckpt-5")
second_manager.save()
self.assertEqual([fourth_name, fifth_name],
second_manager.checkpoints)
state = checkpoint_management.get_checkpoint_state(directory)
self.assertEqual(first_time, state.last_preserved_timestamp)
del second_manager
third_manager = checkpoint_management.CheckpointManager(
checkpoint, directory,
max_to_keep=2, keep_checkpoint_every_n_hours=1.5)
self.assertEqual(fifth_name, third_manager.latest_checkpoint)
mock_time.time.return_value += 10.
third_manager.save()
sixth_name = os.path.join(directory, "ckpt-6")
state = checkpoint_management.get_checkpoint_state(directory)
self.assertEqual(fourth_time, state.last_preserved_timestamp)
self.assertTrue(checkpoint_management.checkpoint_exists(first_name))
self.assertTrue(checkpoint_management.checkpoint_exists(fourth_name))
self.assertTrue(checkpoint_management.checkpoint_exists(fifth_name))
self.assertTrue(checkpoint_management.checkpoint_exists(sixth_name))
self.assertFalse(checkpoint_management.checkpoint_exists(second_name))
self.assertFalse(checkpoint_management.checkpoint_exists(third_name))
self.assertEqual([fifth_name, sixth_name],
third_manager.checkpoints)
@test_util.run_in_graph_and_eager_modes
def testContinueFromUnmanaged(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "unusual_prefix")
checkpoint = util.Checkpoint()
first_path = checkpoint.save(prefix)
second_path = checkpoint.save(prefix)
del checkpoint
checkpoint = util.Checkpoint()
manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=2)
checkpoint.restore(manager.latest_checkpoint).run_restore_ops()
self.assertEqual(2, self.evaluate(checkpoint.save_counter))
third_path = manager.save()
self.assertEqual([third_path], manager.checkpoints)
fourth_path = manager.save()
self.assertEqual([third_path, fourth_path],
manager.checkpoints)
fifth_path = manager.save()
self.assertEqual([fourth_path, fifth_path],
manager.checkpoints)
self.assertTrue(checkpoint_management.checkpoint_exists(first_path))
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
self.assertFalse(checkpoint_management.checkpoint_exists(third_path))
self.assertTrue(checkpoint_management.checkpoint_exists(fourth_path))
self.assertTrue(checkpoint_management.checkpoint_exists(fifth_path))
@test_util.run_in_graph_and_eager_modes
@test.mock.patch.object(checkpoint_management, "time")
def testClockReset(self, mock_time):
directory = self.get_temp_dir()
mock_time.time.return_value = 10000.
checkpoint = util.Checkpoint()
first_manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=1, keep_checkpoint_every_n_hours=1.)
first_path = first_manager.save()
mock_time.time.return_value += 3600.
second_path = first_manager.save()
mock_time.time.return_value += 3600.
third_path = first_manager.save()
self.assertFalse(checkpoint_management.checkpoint_exists(first_path))
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
self.assertTrue(checkpoint_management.checkpoint_exists(third_path))
self.assertEqual([third_path], first_manager.checkpoints)
state = checkpoint_management.get_checkpoint_state(directory)
self.assertEqual(13600., state.last_preserved_timestamp)
# Set the clock back in time
mock_time.time.return_value = 5000.
del first_manager
with test.mock.patch.object(logging, "warning") as mock_log:
second_manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=1)
self.assertRegexpMatches(
str(mock_log.call_args),
"behind the last preserved checkpoint timestamp")
# We should err on the side of keeping checkpoints around when we're not
# sure whether they were preserved or not due to clock funkiness.
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
# We know about the existing checkpoints, but they'll never be deleted and
# so won't go in the CheckpointState proto on save.
self.assertEqual(third_path, second_manager.latest_checkpoint)
self.assertEqual([], second_manager.checkpoints)
mock_time.time.return_value += 10.
fourth_path = second_manager.save()
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
self.assertTrue(checkpoint_management.checkpoint_exists(third_path))
self.assertEqual(fourth_path, second_manager.latest_checkpoint)
self.assertEqual([fourth_path], second_manager.checkpoints)
mock_time.time.return_value += 10.
fifth_path = second_manager.save()
self.assertTrue(checkpoint_management.checkpoint_exists(second_path))
self.assertTrue(checkpoint_management.checkpoint_exists(third_path))
self.assertEqual([fifth_path], second_manager.checkpoints)
state = checkpoint_management.get_checkpoint_state(directory)
self.assertEqual(5000., state.last_preserved_timestamp)
self.assertEqual([5020.],
state.all_model_checkpoint_timestamps)
@test_util.run_in_graph_and_eager_modes
def testCustomNumbering(self):
directory = self.get_temp_dir()
step = variables.Variable(0, dtype=dtypes.int64)
checkpoint = util.Checkpoint(step=step)
manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=2)
self.evaluate(step.initializer)
for i in range(5):
path = manager.save(checkpoint_number=step)
expected_suffix = "-%d" % (2 * i,)
if not path.endswith(expected_suffix):
self.fail("%s should have suffix %s" % (path, expected_suffix))
self.evaluate(step.assign_add(2))
self.assertEqual(5, self.evaluate(checkpoint.save_counter))
# Test regular integers
last_path = manager.save(checkpoint_number=32)
self.assertIn("-32", last_path)
self.assertEqual(last_path, manager.latest_checkpoint)
self.assertEqual(
last_path, checkpoint_management.latest_checkpoint(directory))
state = checkpoint_management.get_checkpoint_state(directory)
# Only the most recent two checkpoints are saved
self.assertEqual([path, last_path], state.all_model_checkpoint_paths)
@test_util.run_in_graph_and_eager_modes
def testCustomCheckpointPrefix(self):
directory = self.get_temp_dir()
checkpoint = util.Checkpoint()
manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=2, checkpoint_name="ckpt_name")
path = manager.save(checkpoint_number=5)
self.assertEqual(os.path.basename(path), "ckpt_name-5")
manager = checkpoint_management.CheckpointManager(
checkpoint, directory, max_to_keep=2)
path = manager.save(checkpoint_number=5)
self.assertEqual(os.path.basename(path), "ckpt-5")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/checkpoint_management_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training_util
@test_util.run_v1_only('b/120545219')
class GlobalStepTest(test.TestCase):
def _assert_global_step(self, global_step, expected_dtype=dtypes.int64):
self.assertEqual('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name)
self.assertEqual(expected_dtype, global_step.dtype.base_dtype)
self.assertEqual([], global_step.get_shape().as_list())
def test_invalid_dtype(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.Variable(
0.0,
trainable=False,
dtype=dtypes.float32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
training_util.get_global_step)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
training_util.get_global_step, g)
def test_invalid_shape(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.VariableV1(
[0],
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'not scalar',
training_util.get_global_step)
self.assertRaisesRegexp(TypeError, 'not scalar',
training_util.get_global_step, g)
def test_create_global_step(self):
self.assertIsNone(training_util.get_global_step())
with ops.Graph().as_default() as g:
global_step = training_util.create_global_step()
self._assert_global_step(global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
training_util.create_global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
training_util.create_global_step, g)
self._assert_global_step(training_util.create_global_step(ops.Graph()))
def test_get_global_step(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.VariableV1(
0,
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self._assert_global_step(
training_util.get_global_step(), expected_dtype=dtypes.int32)
self._assert_global_step(
training_util.get_global_step(g), expected_dtype=dtypes.int32)
def test_get_or_create_global_step(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
self._assert_global_step(training_util.get_or_create_global_step())
self._assert_global_step(training_util.get_or_create_global_step(g))
@test_util.run_v1_only('b/120545219')
class GlobalStepReadTest(test.TestCase):
def test_global_step_read_is_none_if_there_is_no_global_step(self):
with ops.Graph().as_default():
self.assertIsNone(training_util._get_or_create_global_step_read())
training_util.create_global_step()
self.assertIsNotNone(training_util._get_or_create_global_step_read())
def test_reads_from_cache(self):
with ops.Graph().as_default():
training_util.create_global_step()
first = training_util._get_or_create_global_step_read()
second = training_util._get_or_create_global_step_read()
self.assertEqual(first, second)
def test_reads_before_increments(self):
with ops.Graph().as_default():
training_util.create_global_step()
read_tensor = training_util._get_or_create_global_step_read()
inc_op = training_util._increment_global_step(1)
inc_three_op = training_util._increment_global_step(3)
with monitored_session.MonitoredTrainingSession() as sess:
read_value, _ = sess.run([read_tensor, inc_op])
self.assertEqual(0, read_value)
read_value, _ = sess.run([read_tensor, inc_three_op])
self.assertEqual(1, read_value)
read_value = sess.run(read_tensor)
self.assertEqual(4, read_value)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/training_util_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Momentum for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.MomentumOptimizer"])
class MomentumOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Momentum algorithm.
Computes (if `use_nesterov = False`):
```
accumulation = momentum * accumulation + gradient
variable -= learning_rate * accumulation
```
Note that in the dense version of this algorithm, `accumulation` is updated
and applied regardless of a gradient's value, whereas the sparse version (when
the gradient is an `IndexedSlices`, typically because of `tf.gather` or an
embedding) only updates variable slices and corresponding `accumulation` terms
when that part of the variable was used in the forward pass.
"""
def __init__(self, learning_rate, momentum,
use_locking=False, name="Momentum", use_nesterov=False):
"""Construct a new Momentum optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
use_nesterov: If `True` use Nesterov Momentum.
See [Sutskever et al., 2013](
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf).
This implementation always computes gradients at the value of the
variable(s) passed to the optimizer. Using Nesterov Momentum makes the
variable(s) track the values called `theta_t + mu*v_t` in the paper.
This implementation is an approximation of the original formula, valid
for high values of momentum. It will compute the "adjusted gradient"
in NAG by assuming that the new gradient will be estimated by the
current average gradient plus the product of momentum and the change
in the average gradient.
@compatibility(eager)
When eager execution is enabled, `learning_rate` and `momentum` can each be
a callable that takes no arguments and returns the actual value to use. This
can be useful for changing these values across different invocations of
optimizer functions.
@end_compatibility
"""
super(MomentumOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._momentum = momentum
self._use_nesterov = use_nesterov
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
def _prepare(self):
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate()
self._learning_rate_tensor = ops.convert_to_tensor(learning_rate,
name="learning_rate")
momentum = self._momentum
if callable(momentum):
momentum = momentum()
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
def _apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_dense(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.resource_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
grad,
math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
def _apply_sparse(self, grad, var):
mom = self.get_slot(var, "momentum")
return training_ops.sparse_apply_momentum(
var, mom,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
grad.values, grad.indices,
math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov).op
def _resource_apply_sparse(self, grad, var, indices):
mom = self.get_slot(var, "momentum")
return training_ops.resource_sparse_apply_momentum(
var.handle, mom.handle,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
grad, indices,
math_ops.cast(self._momentum_tensor, grad.dtype),
use_locking=self._use_locking,
use_nesterov=self._use_nesterov)
|
tensorflow-master
|
tensorflow/python/training/momentum.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.GrpcServer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class MultipleContainersTest(test.TestCase):
# Verifies behavior of tf.Session.reset() with multiple containers using
# tf.container.
# TODO(b/34465411): Starting multiple servers with different configurations
# in the same test is flaky. Move this test case back into
# "server_lib_test.py" when this is no longer the case.
@test_util.run_deprecated_v1
def testMultipleContainers(self):
with ops.container("test0"):
v0 = variables.Variable(1.0, name="v0")
with ops.container("test1"):
v1 = variables.Variable(2.0, name="v0")
server = server_lib.Server.create_local_server()
sess = session.Session(server.target)
sess.run(variables.global_variables_initializer())
self.assertAllEqual(1.0, sess.run(v0))
self.assertAllEqual(2.0, sess.run(v1))
# Resets container. Session aborts.
session.Session.reset(server.target, ["test0"])
with self.assertRaises(errors_impl.AbortedError):
sess.run(v1)
# Connects to the same target. Device memory for the v0 would have
# been released, so it will be uninitialized. But v1 should still
# be valid.
sess = session.Session(server.target)
with self.assertRaises(errors_impl.FailedPreconditionError):
sess.run(v0)
self.assertAllEqual(2.0, sess.run(v1))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/server_lib_multiple_containers_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for supervisor.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import shutil
import time
import uuid
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.summary import summary_iterator
from tensorflow.python.summary.writer import writer
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import supervisor
def _summary_iterator(test_dir):
"""Reads events from test_dir/events.
Args:
test_dir: Name of the test directory.
Returns:
A summary_iterator
"""
event_paths = sorted(glob.glob(os.path.join(test_dir, "event*")))
return summary_iterator.summary_iterator(event_paths[-1])
class SupervisorTest(test.TestCase):
def _test_dir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
return test_dir
def _wait_for_glob(self, pattern, timeout_secs, for_checkpoint=True):
"""Wait for a checkpoint file to appear.
Args:
pattern: A string.
timeout_secs: How long to wait for in seconds.
for_checkpoint: whether we're globbing for checkpoints.
"""
end_time = time.time() + timeout_secs
while time.time() < end_time:
if for_checkpoint:
if checkpoint_management.checkpoint_exists(pattern):
return
else:
if len(gfile.Glob(pattern)) >= 1:
return
time.sleep(0.05)
self.assertFalse(True, "Glob never matched any file: %s" % pattern)
# This test does not test much.
def testBasics(self):
logdir = self._test_dir("basics")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
for _ in xrange(10):
self.evaluate(my_op)
sess.close()
sv.stop()
def testManagedSession(self):
logdir = self._test_dir("managed_session")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for _ in xrange(10):
self.evaluate(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
def testManagedSessionUserError(self):
logdir = self._test_dir("managed_user_error")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with self.assertRaisesRegexp(RuntimeError, "failing here"):
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 1:
raise RuntimeError("failing here")
else:
self.evaluate(my_op)
# Supervisor has been stopped.
self.assertTrue(sv.should_stop())
self.assertEqual(1, last_step)
def testManagedSessionIgnoreOutOfRangeError(self):
logdir = self._test_dir("managed_out_of_range")
with ops.Graph().as_default():
my_op = constant_op.constant(1.0)
sv = supervisor.Supervisor(logdir=logdir)
last_step = None
with sv.managed_session("") as sess:
for step in xrange(10):
last_step = step
if step == 3:
raise errors_impl.OutOfRangeError(my_op.op.node_def, my_op.op,
"all done")
else:
self.evaluate(my_op)
# Supervisor has been stopped. OutOfRangeError was not thrown.
self.assertTrue(sv.should_stop())
self.assertEqual(3, last_step)
def testManagedSessionDoNotKeepSummaryWriter(self):
logdir = self._test_dir("managed_not_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_op=None)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Sleep 1.2s to make sure that the next event file has a different name
# than the current one.
time.sleep(1.2)
with sv.managed_session(
"", close_summary_writer=True, start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
event_paths = sorted(glob.glob(os.path.join(logdir, "event*")))
self.assertEquals(2, len(event_paths))
# The two event files should have the same contents.
for path in event_paths:
# The summary iterator should report the summary once as we closed the
# summary writer across the 2 sessions.
rr = summary_iterator.summary_iterator(path)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph and metagraph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
# But only once.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
with self.assertRaises(StopIteration):
next(rr)
def testManagedSessionKeepSummaryWriter(self):
logdir = self._test_dir("managed_keep_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
with sv.managed_session(
"", close_summary_writer=False,
start_standard_services=False) as sess:
sv.summary_computed(sess, sess.run(summ))
# Now close the summary writer to flush the events.
sv.summary_writer.close()
# The summary iterator should report the summary twice as we reused
# the same summary writer across the 2 sessions.
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
self.assertTrue(ev.graph_def)
ev = next(rr)
self.assertTrue(ev.meta_graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should also have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def _csv_data(self, logdir):
# Create a small data file with 3 CSV records.
data_path = os.path.join(logdir, "data.csv")
with open(data_path, "w") as f:
f.write("1,2,3\n")
f.write("4,5,6\n")
f.write("7,8,9\n")
return data_path
def testManagedEndOfInputOneQueue(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from a single queue.
logdir = self._test_dir("managed_end_of_input_one_queue")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(rec)
def testManagedEndOfInputTwoQueues(self):
# Tests that the supervisor finishes without an error when using
# a fixed number of epochs, reading from two queues, the second
# one producing a batch from the first one.
logdir = self._test_dir("managed_end_of_input_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
while not sv.should_stop():
sess.run(shuff_rec)
def testManagedMainErrorTwoQueues(self):
# Tests that the supervisor correctly raises a main loop
# error even when using multiple queues for input.
logdir = self._test_dir("managed_main_error_two_queues")
os.makedirs(logdir)
data_path = self._csv_data(logdir)
with self.assertRaisesRegexp(RuntimeError, "fail at step 3"):
with ops.Graph().as_default():
# Create an input pipeline that reads the file 3 times.
filename_queue = input_lib.string_input_producer(
[data_path], num_epochs=3)
reader = io_ops.TextLineReader()
_, csv = reader.read(filename_queue)
rec = parsing_ops.decode_csv(csv, record_defaults=[[1], [1], [1]])
shuff_rec = input_lib.shuffle_batch(rec, 1, 6, 4)
sv = supervisor.Supervisor(logdir=logdir)
with sv.managed_session("") as sess:
for step in range(9):
if sv.should_stop():
break
elif step == 3:
raise RuntimeError("fail at step 3")
else:
sess.run(shuff_rec)
def testSessionConfig(self):
logdir = self._test_dir("session_config")
with ops.Graph().as_default():
with ops.device("/cpu:1"):
my_op = constant_op.constant([1.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session(
"", config=config_pb2.ConfigProto(device_count={"CPU": 2}))
for _ in xrange(10):
self.evaluate(my_op)
sess.close()
sv.stop()
def testChiefCanWriteEvents(self):
logdir = self._test_dir("can_write")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(is_chief=True, logdir=logdir, summary_op=None)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNonChiefCannotWriteEvents(self):
def _summary_computed():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summ = summary.merge_all()
sv.summary_computed(sess, sess.run(summ))
def _start_standard_services():
with ops.Graph().as_default():
sv = supervisor.Supervisor(is_chief=False)
sess = sv.prepare_or_wait_for_session("")
sv.start_standard_services(sess)
self.assertRaises(RuntimeError, _summary_computed)
self.assertRaises(RuntimeError, _start_standard_services)
def testNoLogdirButWantSummary(self):
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
@test_util.run_v1_only("b/120545219")
def testLogdirButExplicitlyNoSummaryWriter(self):
logdir = self._test_dir("explicit_no_summary_writer")
with ops.Graph().as_default():
variables.VariableV1([1.0], name="foo")
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sv = supervisor.Supervisor(logdir=logdir, summary_writer=None)
sess = sv.prepare_or_wait_for_session("")
# Check that a checkpoint is still be generated.
self._wait_for_glob(sv.save_path, 3.0)
# Check that we cannot write a summary
with self.assertRaisesRegexp(RuntimeError, "requires a summary writer"):
sv.summary_computed(sess, sess.run(summ))
def testNoLogdirButExplicitSummaryWriter(self):
logdir = self._test_dir("explicit_summary_writer")
with ops.Graph().as_default():
summary.scalar("c1", constant_op.constant(1))
summary.scalar("c2", constant_op.constant(2))
summary.scalar("c3", constant_op.constant(3))
summ = summary.merge_all()
sw = writer.FileWriter(logdir)
sv = supervisor.Supervisor(logdir="", summary_op=None, summary_writer=sw)
meta_graph_def = meta_graph.create_meta_graph_def()
sess = sv.prepare_or_wait_for_session("")
sv.summary_computed(sess, sess.run(summ))
sess.close()
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# Check the summary was written to 'logdir'
rr = _summary_iterator(logdir)
# The first event should list the file_version.
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
# The next one has the graph.
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
# The next one should have the values from the summary.
ev = next(rr)
self.assertProtoEquals("""
value { tag: 'c1' simple_value: 1.0 }
value { tag: 'c2' simple_value: 2.0 }
value { tag: 'c3' simple_value: 3.0 }
""", ev.summary)
# The next one should be a stop message if we closed cleanly.
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
# We should be done.
self.assertRaises(StopIteration, lambda: next(rr))
def testNoLogdirSucceeds(self):
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir="", summary_op=None)
sess = sv.prepare_or_wait_for_session("")
sess.close()
sv.stop()
def testUseSessionManager(self):
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0])
sm = session_manager_lib.SessionManager()
# Pass in session_manager. The additional init_op is ignored.
sv = supervisor.Supervisor(logdir="", session_manager=sm)
sv.prepare_or_wait_for_session("")
@test_util.run_v1_only("b/120545219")
def testInitOp(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0])
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testInitFn(self):
logdir = self._test_dir("default_init_op")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0])
def _init_fn(sess):
sess.run(v.initializer)
sv = supervisor.Supervisor(logdir=logdir, init_op=None, init_fn=_init_fn)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testInitOpWithFeedDict(self):
logdir = self._test_dir("feed_dict_init_op")
with ops.Graph().as_default():
p = array_ops.placeholder(dtypes.float32, shape=(3,))
v = variables.VariableV1(p, name="v")
sv = supervisor.Supervisor(
logdir=logdir,
init_op=variables.global_variables_initializer(),
init_feed_dict={p: [1.0, 2.0, 3.0]})
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testReadyForLocalInitOp(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_ready_for_local_init_op")
uid = uuid.uuid4().hex
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:local"):
v = variables.VariableV1(
1, name="default_ready_for_local_init_op_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="default_ready_for_local_init_op_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
init_op=v.initializer,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(1, sess0.run(w0))
self.assertEqual(2, sess1.run(vadd1))
self.assertEqual(1, sess1.run(w1))
self.assertEqual(2, sess0.run(v0))
sv0.stop()
sv1.stop()
@test_util.run_v1_only("b/120545219")
def testReadyForLocalInitOpRestoreFromCheckpoint(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("ready_for_local_init_op_restore")
uid = uuid.uuid4().hex
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1(
10.0, name="ready_for_local_init_op_restore_v_" + str(uid))
summary.scalar("ready_for_local_init_op_restore_v_" + str(uid), v)
sv = supervisor.Supervisor(logdir=logdir)
sv.prepare_or_wait_for_session(server.target)
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
def get_session(is_chief):
g = ops.Graph()
with g.as_default():
with ops.device("/job:local"):
v = variables.VariableV1(
1.0, name="ready_for_local_init_op_restore_v_" + str(uid))
vadd = v.assign_add(1)
w = variables.VariableV1(
v,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name="ready_for_local_init_op_restore_w_" + str(uid))
ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
sv = supervisor.Supervisor(
logdir=logdir,
is_chief=is_chief,
graph=g,
recovery_wait_secs=1,
ready_for_local_init_op=ready_for_local_init_op)
sess = sv.prepare_or_wait_for_session(server.target)
return sv, sess, v, vadd, w
sv0, sess0, v0, _, w0 = get_session(True)
sv1, sess1, _, vadd1, w1 = get_session(False)
self.assertEqual(10, sess0.run(w0))
self.assertEqual(11, sess1.run(vadd1))
self.assertEqual(10, sess1.run(w1))
self.assertEqual(11, sess0.run(v0))
sv0.stop()
sv1.stop()
def testLocalInitOp(self):
logdir = self._test_dir("default_local_init_op")
with ops.Graph().as_default():
# A local variable.
v = variables.VariableV1(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# An entity which is initialized through a TABLE_INITIALIZER.
w = variables.VariableV1([4, 5, 6], trainable=False, collections=[])
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, w.initializer)
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
self.assertAllClose([4, 5, 6], sess.run(w))
sv.stop()
def testLocalInitOpForNonChief(self):
logdir = self._test_dir("default_local_init_op_non_chief")
with ops.Graph().as_default():
with ops.device("/job:localhost"):
# A local variable.
v = variables.VariableV1(
[1.0, 2.0, 3.0],
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# This shouldn't add a variable to the VARIABLES collection responsible
# for variables that are saved/restored from checkpoints.
self.assertEquals(len(variables.global_variables()), 0)
# Suppress normal variable inits to make sure the local one is
# initialized via local_init_op.
sv = supervisor.Supervisor(logdir=logdir, init_op=None, is_chief=False)
sess = sv.prepare_or_wait_for_session("")
self.assertAllClose([1.0, 2.0, 3.0], sess.run(v))
sv.stop()
def testInitOpFails(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails")
with ops.Graph().as_default():
v = variables.VariableV1([1.0, 2.0, 3.0], name="v")
variables.VariableV1([4.0, 5.0, 6.0], name="w")
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
def testInitOpFailsForTransientVariable(self):
server = server_lib.Server.create_local_server()
logdir = self._test_dir("default_init_op_fails_for_local_variable")
with ops.Graph().as_default():
v = variables.VariableV1(
[1.0, 2.0, 3.0],
name="v",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
variables.VariableV1(
[1.0, 2.0, 3.0],
name="w",
collections=[ops.GraphKeys.LOCAL_VARIABLES])
# w will not be initialized.
sv = supervisor.Supervisor(logdir=logdir, local_init_op=v.initializer)
with self.assertRaisesRegexp(RuntimeError,
"Variables not initialized: w"):
sv.prepare_or_wait_for_session(server.target)
@test_util.run_v1_only("b/120545219")
def testSetupFail(self):
logdir = self._test_dir("setup_fail")
with ops.Graph().as_default():
variables.VariableV1([1.0, 2.0, 3.0], name="v")
with self.assertRaisesRegexp(ValueError, "must have their device set"):
supervisor.Supervisor(logdir=logdir, is_chief=False)
with ops.Graph().as_default(), ops.device("/job:ps"):
variables.VariableV1([1.0, 2.0, 3.0], name="v")
supervisor.Supervisor(logdir=logdir, is_chief=False)
@test_util.run_v1_only("b/120545219")
def testDefaultGlobalStep(self):
logdir = self._test_dir("default_global_step")
with ops.Graph().as_default():
variables.VariableV1(287, name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
self.assertEquals(287, sess.run(sv.global_step))
sv.stop()
@test_util.run_v1_only("b/120545219")
def testRestoreFromMetaGraph(self):
logdir = self._test_dir("restore_from_meta_graph")
with ops.Graph().as_default():
variables.VariableV1(1, name="v0")
sv = supervisor.Supervisor(logdir=logdir)
sess = sv.prepare_or_wait_for_session("")
filename = sv.saver.save(sess, sv.save_path)
sv.stop()
# Create a new Graph and Supervisor and recover.
with ops.Graph().as_default():
new_saver = saver_lib.import_meta_graph(".".join([filename, "meta"]))
self.assertIsNotNone(new_saver)
sv2 = supervisor.Supervisor(logdir=logdir, saver=new_saver)
sess = sv2.prepare_or_wait_for_session("")
self.assertEquals(1, sess.run("v0:0"))
sv2.saver.save(sess, sv2.save_path)
sv2.stop()
# This test is based on the fact that the standard services start
# right away and get to run once before sv.stop() returns.
# We still sleep a bit to make the test robust.
@test_util.run_v1_only("b/120545219")
def testStandardServicesWithoutGlobalStep(self):
logdir = self._test_dir("standard_services_without_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1([1.0], name="foo")
summary.scalar("v", v[0])
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
save_path = sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
# Stored MetaGraphDef
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
self.assertProtoEquals("value { tag: 'v' simple_value: 1.0 }", ev.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([10.10], name="foo")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(1.0, self.evaluate(v)[0])
# Same as testStandardServicesNoGlobalStep but with a global step.
# We should get a summary about the step time.
@test_util.run_v1_only("b/120545219")
def testStandardServicesWithGlobalStep(self):
logdir = self._test_dir("standard_services_with_global_step")
# Create a checkpoint.
with ops.Graph().as_default():
v = variables.VariableV1([123], name="global_step")
sv = supervisor.Supervisor(logdir=logdir)
meta_graph_def = meta_graph.create_meta_graph_def(
saver_def=sv.saver.saver_def)
sess = sv.prepare_or_wait_for_session("")
# This is where the checkpoint will appear, with step number 123.
save_path = "%s-123" % sv.save_path
self._wait_for_glob(save_path, 3.0)
self._wait_for_glob(
os.path.join(logdir, "*events*"), 3.0, for_checkpoint=False)
# Wait to make sure everything is written to file before stopping.
time.sleep(1)
sv.stop()
# There should be an event file with a version number.
rr = _summary_iterator(logdir)
ev = next(rr)
self.assertEquals("brain.Event:2", ev.file_version)
ev = next(rr)
ev_graph = graph_pb2.GraphDef()
ev_graph.ParseFromString(ev.graph_def)
self.assertProtoEquals(sess.graph.as_graph_def(add_shapes=True), ev_graph)
ev = next(rr)
ev_meta_graph = meta_graph_pb2.MetaGraphDef()
ev_meta_graph.ParseFromString(ev.meta_graph_def)
self.assertProtoEquals(meta_graph_def, ev_meta_graph)
self.assertProtoEquals(
sess.graph.as_graph_def(add_shapes=True), ev_meta_graph.graph_def)
ev = next(rr)
# It is actually undeterministic whether SessionLog.START gets written
# before the summary or the checkpoint, but this works when run 10000 times.
self.assertEquals(123, ev.step)
self.assertEquals(event_pb2.SessionLog.START, ev.session_log.status)
first = next(rr)
second = next(rr)
# It is undeterministic whether the value gets written before the checkpoint
# since they are on separate threads, so we check for both conditions.
if first.HasField("summary"):
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", first.summary)
self.assertEquals(123, second.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
second.session_log.status)
else:
self.assertEquals(123, first.step)
self.assertEquals(event_pb2.SessionLog.CHECKPOINT,
first.session_log.status)
self.assertProtoEquals("""value { tag: 'global_step/sec'
simple_value: 0.0 }""", second.summary)
ev = next(rr)
self.assertEquals(event_pb2.SessionLog.STOP, ev.session_log.status)
self.assertRaises(StopIteration, lambda: next(rr))
# There should be a checkpoint file with the variable "foo"
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([-12], name="global_step")
sav = saver_lib.Saver([v])
sav.restore(sess, save_path)
self.assertEqual(123, self.evaluate(v)[0])
def testNoQueueRunners(self):
with ops.Graph().as_default(), self.cached_session() as sess:
sv = supervisor.Supervisor(logdir=self._test_dir("no_queue_runners"))
self.assertEqual(0, len(sv.start_queue_runners(sess)))
sv.stop()
def testPrepareSessionAfterStopForChief(self):
logdir = self._test_dir("prepare_after_stop_chief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=True)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
def testPrepareSessionAfterStopForNonChief(self):
logdir = self._test_dir("prepare_after_stop_nonchief")
with ops.Graph().as_default():
sv = supervisor.Supervisor(logdir=logdir, is_chief=False)
# Create a first session and then stop.
sess = sv.prepare_or_wait_for_session("")
sv.stop()
sess.close()
self.assertTrue(sv.should_stop())
# Now create a second session and test that we don't stay stopped, until
# we ask to stop again.
sess2 = sv.prepare_or_wait_for_session("")
self.assertFalse(sv.should_stop())
sv.stop()
sess2.close()
self.assertTrue(sv.should_stop())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/supervisor_test.py
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import sys
import threading
import time
import traceback
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.VariableV1(1, name='my_var')
variables.VariableV1(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.cached_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.VariableV1([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.VariableV1([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_save_checkpoint_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_save_checkpoint_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(10):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(11, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class MockExtended(object):
def __init__(self, between_graph, should_init, should_checkpoint,
should_save_summary):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=True,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
class MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):
"""Test distribute coordinator controls summary saving and checkpointing."""
def test_summary_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summary_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
# No summary is saved.
summaries = util_test.latest_summaries(logdir)
self.assertEqual(len(summaries), 0)
def test_checkpoint_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_checkpoint_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
strategy.extended._is_chief = False
context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
# But saved to a temporary directory.
checkpoint = checkpoint_management.latest_checkpoint(
os.path.join(logdir, 'tmp_worker_1'))
self.assertIsNotNone(checkpoint)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_uses_check_stop(self):
with self.cached_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_delegates_to_wrapped_session(self):
with self.cached_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
@test_util.run_deprecated_v1
def test_close_twice(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_on_coord_stop(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_stop_threads_on_close_after_exception(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_propagates_exception_trace(self):
assertion = control_flow_ops.Assert(False, ['This should fail.'])
with self.cached_session() as sess:
coord = coordinator.Coordinator(clean_stop_exception_types=())
coord_sess = monitored_session._CoordinatedSession(sess, coord)
try:
coord_sess.run([assertion])
self.fail('No exception was raised by assertion.')
except errors_impl.InvalidArgumentError:
# Extract the name of the file where the exception was first raised.
_, _, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
exc_source_file = tb[-1][0]
exc_source_basename = os.path.basename(exc_source_file)
# If it's monitored_session.py then the original stack trace was not
# correctly propagated.
self.assertIn(
exc_source_basename, ['session.py', 'monitored_session.py'],
'The exception was raised from an unrecognized file. This unit '
'test probably needs to be updated. Traceback:\n%s\n' % tb)
self.assertEqual(
exc_source_basename, 'session.py',
'Original stack trace was not propagated by MonitoredSession. '
'Traceback:\n%s' % tb)
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
@test_util.run_deprecated_v1
def test_recovery(self):
with self.cached_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
@test_util.run_deprecated_v1
def test_recovery_from_coordinator_exception(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def create_raw_session_with_failing_coordinator(self, session_creator, hook):
"""Return MonitoredSession that triggers coordinator failures."""
session = monitored_session.MonitoredSession(session_creator, [hook])
# We would like to test a situation where during fetches through the
# raw session, the coordinator fails with an exception. To do that, we
# are going to use (raw_session + StopCoordinatorWithException) hook
# combination that is stored in
# `MonitoredSession._RecoverableSession._CoordinatedSession._sess`
# at this point:
session._tf_sess = lambda: session._sess._sess._sess
# `run()` on such a session is equivalent to `run()` on the raw session
# with separate coordinator threads independently stopping with an
# exception.
return session
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.session.run(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.')))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch, report_tensor_allocations_upon_oom):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self._report_tensor_allocations_upon_oom = (
report_tensor_allocations_upon_oom)
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs,
report_tensor_allocations_upon_oom=self
._report_tensor_allocations_upon_oom)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
@test_util.run_deprecated_v1
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=checkpoint_management.
latest_checkpoint(logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]),
report_tensor_allocations_upon_oom=True),
], hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=30000,
output_partition_graphs=True,
report_tensor_allocations_upon_oom=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]),
report_tensor_allocations_upon_oom=True),
], hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
@test_util.run_deprecated_v1
def test_with_statement_and_close(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/12224
# where close() inside the with should have a better error message.
with self.assertRaisesRegexp(RuntimeError, 'Session is already closed'):
with monitored_session.MonitoredSession() as session:
session.close()
def test_step_fn_example(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
def test_step_function_stops(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
def test_step_request_stop_without_a_with_block(self):
with ops.Graph().as_default():
was_stop_iteration_raised = False
def step_fn(step_context):
step_context.request_stop()
session = monitored_session.MonitoredSession()
try:
self.assertEqual(None, session.run_step_fn(step_fn))
except StopIteration:
was_stop_iteration_raised = True
self.assertTrue(was_stop_iteration_raised)
self.assertFalse(session.should_stop())
def test_step_request_stop_in_a_loop(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
while not session.should_stop():
_ = session.run_step_fn(step_fn)
self.fail('An exception should be raised on the line above.')
def test_step_request_stop_with_returning_a_type(self):
with ops.Graph().as_default():
def step_fn(step_context):
del step_context
return 'a type'
with monitored_session.MonitoredSession() as session:
self.assertEqual('a type', session.run_step_fn(step_fn))
def test_step_with_extra_arguments(self):
with ops.Graph().as_default():
def step_fn(step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegexp(
ValueError,
'`step_fn` may either have one `step_context` argument'):
self.assertEqual(None, session.run_step_fn(step_fn))
def test_step_fn_belongs_to_a_class(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
class Model(object):
def step_fn(self, step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
model = Model()
self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)
def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):
with ops.Graph().as_default():
class Model(object):
def step_fn(self, step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegexp(
ValueError,
'`step_fn` may either have one `step_context` argument'):
model = Model()
self.assertEqual(None, session.run_step_fn(model.step_fn))
def test_step_fn_with_hooks(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
# This test higlights the interaction of hooks with
# `Monitoredsession.run_step_fn`. The order of execution of operations
# below is:
# 0. stage_0
# 1. stage_1_0 or stage_1_1 in an undefined order
# 2. stage_2
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
# The order of `stage_1_0` and `stage_1_1` is undefined by
# `MonitoredSession`, but we should be able to assert when both of them
# are complete. To obtain a consistent result of adding two different
# constants to `var`, we rely on a control dependency and
# `ResourceVariable`. Otherwise, it is possible that one of the
# additions overwites the result of the other addition.
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.SingularMonitoredSession(
hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_with_hooks_and_request_stop(self):
with ops.Graph().as_default():
trace_the_hook = {'before_run': False, 'after_run': False}
class Hook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
trace_the_hook['before_run'] = True
def after_run(self, run_context, run_values):
trace_the_hook['after_run'] = True
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession(hooks=[Hook()]) as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
# `step_context.request_stop()` in a step_fn interrupts the flow of
# running the hooks.
self.assertFalse(trace_the_hook['before_run'])
self.assertFalse(trace_the_hook['after_run'])
def test_recovers_from_an_exception_in_step_fn(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
def test_recovers_from_an_exception_in_step_fn_after_hooks(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return value
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session)) as session:
session.run(variables.global_variables_initializer())
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
# Make sure the rest of the body of the step_fn is re-executed upon
# AbortedError:
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.SingularMonitoredSession() as session:
with self.assertRaisesRegexp(errors_impl.AbortedError, 'Abort'):
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.fail()
self.assertTrue(trace_the_exception['run_already'])
def test_step_fn_exception_from_before_run(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
vv = constant_op.constant(3.2)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return session_run_hook.SessionRunArgs(fetches=vv)
def after_run(self, run_context, run_values):
self._testing.assertNear(3.2, run_values.results, 0.1)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session),
hooks=[Hook(self)]) as session:
test_session.run(variables.global_variables_initializer())
self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/monitored_session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for training models.
See the [Training](https://tensorflow.org/api_guides/python/train) guide.
"""
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order,unused-import
from tensorflow.python.ops.sdca_ops import sdca_optimizer
from tensorflow.python.ops.sdca_ops import sdca_fprint
from tensorflow.python.ops.sdca_ops import sdca_shrink_l1
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adagrad_da import AdagradDAOptimizer
from tensorflow.python.training.proximal_adagrad import ProximalAdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.experimental.loss_scale_optimizer import MixedPrecisionLossScaleOptimizer
from tensorflow.python.training.experimental.mixed_precision import enable_mixed_precision_graph_rewrite
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.proximal_gradient_descent import ProximalGradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import * # pylint: disable=redefined-builtin
# pylint: enable=wildcard-import
from tensorflow.python.training.basic_session_run_hooks import get_or_create_steps_per_run_variable
from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer
from tensorflow.python.training.basic_session_run_hooks import LoggingTensorHook
from tensorflow.python.training.basic_session_run_hooks import StopAtStepHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverHook
from tensorflow.python.training.basic_session_run_hooks import CheckpointSaverListener
from tensorflow.python.training.basic_session_run_hooks import StepCounterHook
from tensorflow.python.training.basic_session_run_hooks import NanLossDuringTrainingError
from tensorflow.python.training.basic_session_run_hooks import NanTensorHook
from tensorflow.python.training.basic_session_run_hooks import SummarySaverHook
from tensorflow.python.training.basic_session_run_hooks import GlobalStepWaiterHook
from tensorflow.python.training.basic_session_run_hooks import FinalOpsHook
from tensorflow.python.training.basic_session_run_hooks import FeedFnHook
from tensorflow.python.training.basic_session_run_hooks import ProfilerHook
from tensorflow.python.training.basic_loops import basic_train_loop
from tensorflow.python.training.tracking.python_state import PythonState
from tensorflow.python.training.tracking.util import Checkpoint
from tensorflow.python.training.checkpoint_utils import init_from_checkpoint
from tensorflow.python.training.checkpoint_utils import list_variables
from tensorflow.python.training.checkpoint_utils import load_checkpoint
from tensorflow.python.training.checkpoint_utils import load_variable
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.monitored_session import Scaffold
from tensorflow.python.training.monitored_session import MonitoredTrainingSession
from tensorflow.python.training.monitored_session import SessionCreator
from tensorflow.python.training.monitored_session import ChiefSessionCreator
from tensorflow.python.training.monitored_session import WorkerSessionCreator
from tensorflow.python.training.monitored_session import MonitoredSession
from tensorflow.python.training.monitored_session import SingularMonitoredSession
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.checkpoint_management import checkpoint_exists
from tensorflow.python.training.checkpoint_management import generate_checkpoint_state_proto
from tensorflow.python.training.checkpoint_management import get_checkpoint_mtimes
from tensorflow.python.training.checkpoint_management import get_checkpoint_state
from tensorflow.python.training.checkpoint_management import latest_checkpoint
from tensorflow.python.training.checkpoint_management import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_run_hook import SessionRunHook
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.session_run_hook import SessionRunContext
from tensorflow.python.training.session_run_hook import SessionRunValues
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.training.training_util import get_global_step
from tensorflow.python.training.training_util import assert_global_step
from tensorflow.python.training.training_util import create_global_step
from tensorflow.python.training.training_util import get_or_create_global_step
from tensorflow.python.training.warm_starting_util import VocabInfo
from tensorflow.python.training.warm_starting_util import warm_start
from tensorflow.python.pywrap_tensorflow import do_quantize_training_on_graphdef
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=wildcard-import
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import *
# pylint: enable=wildcard-import
# Distributed computing support.
from tensorflow.core.protobuf.cluster_pb2 import ClusterDef
from tensorflow.core.protobuf.cluster_pb2 import JobDef
from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.training.server_lib import Server
# pylint: disable=undefined-variable
tf_export("train.BytesList")(BytesList)
tf_export("train.ClusterDef")(ClusterDef)
tf_export("train.Example")(Example)
tf_export("train.Feature")(Feature)
tf_export("train.Features")(Features)
tf_export("train.FeatureList")(FeatureList)
tf_export("train.FeatureLists")(FeatureLists)
tf_export("train.FloatList")(FloatList)
tf_export("train.Int64List")(Int64List)
tf_export("train.JobDef")(JobDef)
tf_export(v1=["train.SaverDef"])(SaverDef)
tf_export("train.SequenceExample")(SequenceExample)
tf_export("train.ServerDef")(ServerDef)
# pylint: enable=undefined-variable
|
tensorflow-master
|
tensorflow/python/training/training.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for learning rate decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import learning_rate_decay
class LRDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testContinuous(self):
self.evaluate(variables.global_variables_initializer())
step = 5
decayed_lr = learning_rate_decay.exponential_decay(0.05, step, 10, 0.96)
expected = .05 * 0.96**(5.0 / 10.0)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
if context.executing_eagerly():
step = resource_variable_ops.ResourceVariable(0)
self.evaluate(variables.global_variables_initializer())
decayed_lr = learning_rate_decay.exponential_decay(
.1, step, 3, 0.96, staircase=True)
# No change to learning rate due to staircase
expected = .1
self.evaluate(step.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
expected = .1
self.evaluate(step.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6)
# Decayed learning rate
expected = .1 * 0.96 ** (100 // 3)
self.evaluate(step.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_deprecated_v1
def testVariables(self):
step = variables.VariableV1(1)
assign_1 = step.assign(1)
assign_2 = step.assign(2)
assign_100 = step.assign(100)
decayed_lr = learning_rate_decay.exponential_decay(
.1, step, 3, 0.96, staircase=True)
self.evaluate(variables.global_variables_initializer())
# No change to learning rate
self.evaluate(assign_1.op)
self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6)
self.evaluate(assign_2.op)
self.assertAllClose(self.evaluate(decayed_lr), .1, 1e-6)
# Decayed learning rate
self.evaluate(assign_100.op)
expected = .1 * 0.96**(100 // 3)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testPiecewiseConstant(self):
x = resource_variable_ops.ResourceVariable(-999)
decayed_lr = learning_rate_decay.piecewise_constant(
x, [100, 110, 120], [1.0, 0.1, 0.01, 0.001])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(100))
self.assertAllClose(self.evaluate(decayed_lr), 1.0, 1e-6)
self.evaluate(x.assign(105))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(110))
self.assertAllClose(self.evaluate(decayed_lr), 0.1, 1e-6)
self.evaluate(x.assign(120))
self.assertAllClose(self.evaluate(decayed_lr), 0.01, 1e-6)
self.evaluate(x.assign(999))
self.assertAllClose(self.evaluate(decayed_lr), 0.001, 1e-6)
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testPiecewiseConstantEdgeCases(self):
x_int = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int32)
boundaries, values = [-1.0, 1.0], [1, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = learning_rate_decay.piecewise_constant(
x_int, boundaries, values)
if context.executing_eagerly():
decayed_lr()
x = resource_variable_ops.ResourceVariable(0.0)
boundaries, values = [-1.0, 1.0], [1.0, 2, 3]
with self.assertRaises(ValueError):
decayed_lr = learning_rate_decay.piecewise_constant(
x, boundaries, values)
if context.executing_eagerly():
decayed_lr()
# Test that ref types are valid.
if not context.executing_eagerly():
x = variables.VariableV1(0.0)
x_ref = x.op.outputs[0] # float32_ref tensor should be accepted
boundaries, values = [1.0, 2.0], [1, 2, 3]
learning_rate_decay.piecewise_constant(x_ref, boundaries, values)
# Test casting boundaries from int32 to int64.
x_int64 = resource_variable_ops.ResourceVariable(
0, dtype=variables.dtypes.int64)
boundaries, values = [1, 2, 3], [0.4, 0.5, 0.6, 0.7]
decayed_lr = learning_rate_decay.piecewise_constant(
x_int64, boundaries, values)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(1))
self.assertAllClose(self.evaluate(decayed_lr), 0.4, 1e-6)
self.evaluate(x_int64.assign(2))
self.assertAllClose(self.evaluate(decayed_lr), 0.5, 1e-6)
self.evaluate(x_int64.assign(3))
self.assertAllClose(self.evaluate(decayed_lr), 0.6, 1e-6)
self.evaluate(x_int64.assign(4))
self.assertAllClose(self.evaluate(decayed_lr), 0.7, 1e-6)
class LinearDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = lr * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = (lr + end_lr) * 0.5
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(lr, step, 10, end_lr)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, cycle=True)
expected = (lr - end_lr) * 0.25 + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class SqrtDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testHalfWay(self):
step = 5
lr = 0.05
end_lr = 0.0
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = lr * 0.5**power
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testEnd(self):
step = 10
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testHalfWayWithEnd(self):
step = 5
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = (lr - end_lr) * 0.5**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEnd(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power)
expected = end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testBeyondEndWithCycle(self):
step = 15
lr = 0.05
end_lr = 0.001
power = 0.5
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, 10, end_lr, power=power, cycle=True)
expected = (lr - end_lr) * 0.25**power + end_lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class PolynomialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testBeginWithCycle(self):
lr = 0.001
decay_steps = 10
step = 0
decayed_lr = learning_rate_decay.polynomial_decay(
lr, step, decay_steps, cycle=True)
expected = lr
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class ExponentialDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.natural_exp_decay(initial_lr, step, k,
decay_rate)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.natural_exp_decay(
initial_lr, step, k, decay_rate, staircase=True)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr * math.exp(-decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
class InverseDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.inverse_time_decay(initial_lr, step, k,
decay_rate)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + i / k * decay_rate)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
@test_util.run_in_graph_and_eager_modes
def testStaircase(self):
initial_lr = 0.1
k = 10
decay_rate = 0.96
step = resource_variable_ops.ResourceVariable(0)
decayed_lr = learning_rate_decay.inverse_time_decay(
initial_lr, step, k, decay_rate, staircase=True)
self.evaluate(variables.global_variables_initializer())
for i in range(k + 1):
expected = initial_lr / (1 + decay_rate * (i // k))
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
self.evaluate(step.assign_add(1))
class CosineDecayTest(test_util.TensorFlowTestCase):
def np_cosine_decay(self, step, decay_steps, alpha=0.0):
step = min(step, decay_steps)
completed_fraction = step / decay_steps
decay = 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay(initial_lr, step,
num_training_steps)
expected = self.np_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay(initial_lr, step,
num_training_steps, alpha)
expected = self.np_cosine_decay(step, num_training_steps, alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class CosineDecayRestartsTest(test_util.TensorFlowTestCase):
def np_cosine_decay_restarts(self, step, decay_steps, t_mul=2.0, m_mul=1.0,
alpha=0.0):
fac = 1.0
while step >= decay_steps:
step -= decay_steps
decay_steps *= t_mul
fac *= m_mul
completed_fraction = step / decay_steps
decay = fac * 0.5 * (1.0 + math.cos(math.pi * completed_fraction))
return (1.0 - alpha) * decay + alpha
@test_util.run_in_graph_and_eager_modes
def testDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps)
expected = self.np_cosine_decay_restarts(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testAlpha(self):
num_training_steps = 1000
initial_lr = 1.0
alpha = 0.1
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, alpha=alpha)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, alpha=alpha)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testMMul(self):
num_training_steps = 1000
initial_lr = 1.0
m_mul = 0.9
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, m_mul=m_mul)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, m_mul=m_mul)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testTMul(self):
num_training_steps = 1000
initial_lr = 1.0
t_mul = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.cosine_decay_restarts(
initial_lr, step, num_training_steps, t_mul=t_mul)
expected = self.np_cosine_decay_restarts(
step, num_training_steps, t_mul=t_mul)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class LinearCosineDecayTest(test_util.TensorFlowTestCase):
def np_linear_cosine_decay(self,
step,
decay_steps,
alpha=0.0,
beta=0.001,
num_periods=0.5):
step = min(step, decay_steps)
linear_decayed = float(decay_steps - step) / decay_steps
fraction = 2.0 * num_periods * step / float(decay_steps)
cosine_decayed = 0.5 * (1.0 + math.cos(math.pi * fraction))
return (alpha + linear_decayed) * cosine_decayed + beta
@test_util.run_in_graph_and_eager_modes
def testDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr, step, num_training_steps)
expected = self.np_linear_cosine_decay(step, num_training_steps)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
@test_util.run_in_graph_and_eager_modes
def testNonDefaultDecay(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
decayed_lr = learning_rate_decay.linear_cosine_decay(
initial_lr,
step,
num_training_steps,
alpha=0.1,
beta=1e-4,
num_periods=5)
expected = self.np_linear_cosine_decay(
step, num_training_steps, alpha=0.1, beta=1e-4, num_periods=5)
self.assertAllClose(self.evaluate(decayed_lr), expected, 1e-6)
class NoisyLinearCosineDecayTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr, step, num_training_steps)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
@test_util.run_in_graph_and_eager_modes
def testNonDefaultNoisyLinearCosine(self):
num_training_steps = 1000
initial_lr = 1.0
for step in range(0, 1500, 250):
# No numerical check because of noise
decayed_lr = learning_rate_decay.noisy_linear_cosine_decay(
initial_lr,
step,
num_training_steps,
initial_variance=0.5,
variance_decay=0.1,
alpha=0.1,
beta=1e-4,
num_periods=5)
# Cannot be deterministically tested
self.evaluate(decayed_lr)
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/training/learning_rate_decay_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adadelta for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.AdadeltaOptimizer"])
class AdadeltaOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
See [M. D. Zeiler](http://arxiv.org/abs/1212.5701)
([pdf](http://arxiv.org/pdf/1212.5701v1.pdf))
"""
def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-8,
use_locking=False, name="Adadelta"):
"""Construct a new Adadelta optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
To match the exact form in the original paper use 1.0.
rho: A `Tensor` or a floating point value. The decay rate.
epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adadelta".
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `rho`, and `epsilon` can
each be a callable that takes no arguments and returns the actual value to
use. This can be useful for changing these values across different
invocations of optimizer functions.
@end_compatibility
"""
super(AdadeltaOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._rho = rho
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._rho_t = None
self._epsilon_t = None
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "accum", self._name)
self._zeros_slot(v, "accum_update", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
rho = self._call_if_callable(self._rho)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="lr")
self._rho_t = ops.convert_to_tensor(rho, name="rho")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.apply_adadelta(
var,
accum,
accum_update,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._rho_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.resource_apply_adadelta(
var.handle,
accum.handle,
accum_update.handle,
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._rho_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.sparse_apply_adadelta(
var,
accum,
accum_update,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._rho_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return training_ops.resource_sparse_apply_adadelta(
var.handle,
accum.handle,
accum_update.handle,
math_ops.cast(self._lr_t, grad.dtype),
math_ops.cast(self._rho_t, grad.dtype),
math_ops.cast(self._epsilon_t, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
|
tensorflow-master
|
tensorflow/python/training/adadelta.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions to use mixed precision with the graph rewrite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import config
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import optimizer
from tensorflow.python.training.experimental import loss_scale_optimizer as loss_scale_optimizer_v1
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
def _wrap_optimizer(opt, loss_scale):
"""Wraps an optimizer with a LossScaleOptimizer."""
if isinstance(opt, loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer):
raise ValueError('"opt" must not already be an instance of a '
'MixedPrecisionLossScaleOptimizer. '
'`enable_mixed_precision_graph_rewrite` will '
'automatically wrap the optimizer with a '
'MixedPrecisionLossScaleOptimizer.')
# To avoid a circular dependency, we cannot depend on tf.keras. Because
# LossScaleOptimizer is in Keras, we cannot use isinstance, so instead check
# the class name.
if opt.__class__.__name__ == 'LossScaleOptimizer':
raise ValueError('"opt" must not already be an instance of a '
'LossScaleOptimizer. '
'`enable_mixed_precision_graph_rewrite` will '
'automatically wrap the optimizer with a '
'LossScaleOptimizer.')
if isinstance(opt, optimizer.Optimizer):
# For convenience, we allow the V2 version of this function to wrap the V1
# optimizer, even though we do not document this.
return loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
loss_scale)
# Because we cannot depend on tf.keras, we see if `opt` is an instance of the
# Keras OptimizerV2 class by checking the subclass names.
base_classes = tf_inspect.getmro(opt.__class__)
base_class_names = [cls.__name__ for cls in base_classes]
is_loss_scale_optimizer_v2 = 'OptimizerV2' in base_class_names
if is_loss_scale_optimizer_v2:
# Because we cannot depend on tf.keras, we cannot unconditionally do this
# import. But since `opt` is a Keras OptimizerV2, we know keras is
# importable, so it is safe to do this import. (Technically, it's possible
# to have a dependency on OptimizerV2 and not LossScaleOptimizer, but this
# is not done in practice).
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer as loss_scale_optimizer_v2 # pylint: disable=g-import-not-at-top
return loss_scale_optimizer_v2.LossScaleOptimizer(opt, loss_scale)
raise ValueError('"opt" must be an instance of a tf.train.Optimizer or a '
'tf.keras.optimizers.Optimizer, but got: %s' % opt)
@tf_export(v1=['train.experimental.enable_mixed_precision_graph_rewrite'])
def enable_mixed_precision_graph_rewrite(opt, loss_scale='dynamic'):
"""Enable mixed precision via a graph rewrite.
Mixed precision is the use of both float16 and float32 when training a model,
and is used to make the model run faster. This function will use mixed
precision to speed up the execution time of your model when run on a GPU. It
does this by changing the dtype of certain operations in the graph from
float32 to float16.
This function additionally wraps an Optimizer with a LossScaleOptimizer, which
is required to prevent underflow in the float16 tensors during the backwards
pass. An optimizer must be passed to this function, which will then be wrapped
to use loss scaling.
When this function is used, gradients should only be computed and applied with
the returned optimizer, either by calling `opt.minimize()` or
`opt.compute_gradients()` followed by `opt.apply_gradients()`. Gradients
should not be computed with `tf.gradients` or `tf.GradientTape`. This is
because the returned optimizer will apply loss scaling, and
`tf.gradients`/`tf.GradientTape` will not. If you do directly use
`tf.gradients` or `tf.GradientTape`, your model may train to a worse quality.
When eager execution is enabled, the mixed precision graph rewrite is only
enabled within `tf.function`s, as outside `tf.function`s, there is no graph.
When enabled, mixed precision is only used on Volta GPUs and above. The parts
of the graph on CPUs and TPUs are untouched by the graph rewrite.
Args:
opt: An instance of a `tf.keras.optimizers.Optimizer` or a
`tf.train.Optimizer`.
loss_scale: Either an int/float, the string "dynamic", or an instance of a
`tf.train.experimental.LossScale`. The loss scale to use. It is
recommended to keep this as its default value of "dynamic".
Returns:
A version of `opt` that will use loss scaling to prevent underflow.
"""
# TODO(reedwm): If a ConfigProto is passed to Session, either assert that
# auto_mixed_precision is on or turn it on for the user.
if not mixed_precision_global_state.using_default_mixed_precision_policy:
raise ValueError(
'The mixed precision graph rewrite cannot be enabled, because a keras '
'mixed precision Policy has been set. At most, one of the following '
'functions can be called:\n\n'
' 1. tf.keras.mixed_precision.experimental.set_policy() (You called '
'this first)\n'
' 2. tf.train.experimental.enable_mixed_precision_graph_rewrite() '
'(You called this second)\n\n'
'You called both functions, which is an error, because both functions '
'enable you to use mixed precision. The second function enables mixed '
'precision in the graph with a graph rewrite. However it is currently '
'not very customizable, and does not support eager. The first '
'function is for Keras layers, but is not yet fully complete.')
if mixed_precision_global_state.non_mixed_precision_session_created:
# TODO(reedwm): Give the stacktrace of the existing Sessions. And if the
# Sessions have already been closed, do not raise this error message.
tf_logging.warn('You already have existing Sessions that do not use mixed '
'precision. enable_mixed_precision_graph_rewrite() will '
'not affect these Sessions.')
opt = _wrap_optimizer(opt, loss_scale)
config.set_optimizer_experimental_options({'auto_mixed_precision': True})
mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled = True
return opt
@tf_export(v1=['train.experimental.disable_mixed_precision_graph_rewrite'])
def disable_mixed_precision_graph_rewrite():
"""Disables the mixed precision graph rewrite.
After this is called, the mixed precision graph rewrite will no longer run for
new Sessions, and so float32 operations will no longer be converted to float16
in such Sessions. However, any existing Sessions will continue to have the
graph rewrite enabled if they were created after
`enable_mixed_precision_graph_rewrite` was called but before
`disable_mixed_precision_graph_rewrite` was called.
This does not undo the effects of loss scaling. Any optimizers wrapped with a
LossScaleOptimizer will continue to do loss scaling, although this loss
scaling will no longer be useful if the optimizer is used in new Sessions, as
the graph rewrite no longer converts the graph to use float16.
This function is useful for unit testing. A unit tests can test using the
mixed precision graph rewrite, then disable it so future unit tests continue
using float32. If this is done, unit tests should not share a single session,
as `enable_mixed_precision_graph_rewrite` and
`disable_mixed_precision_graph_rewrite` have no effect on existing sessions.
"""
if not mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled:
tf_logging.warn('disable_mixed_precision_graph_rewrite() called when mixed '
'precision is already disabled.')
config.set_optimizer_experimental_options({'auto_mixed_precision': False})
mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled = False
|
tensorflow-master
|
tensorflow/python/training/experimental/mixed_precision.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains global variables related to mixed precision.
This is not part of mixed_precision.py to avoid a circular dependency.
mixed_precision.py depends on Session, and Session depends on this file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Whether the mixed precision graph rewrite has been enabled or not with
# `enable_mixed_precision_graph_rewrite`. Used to turn on auto_mixed_precision
# in ConfigProtos passed to Sessions.
mixed_precision_graph_rewrite_is_enabled = False
# True if a Session has been created without the mixed precision graph rewrite
# being enabled. Used to give a warning if mixed precision is enabled after a
# Session has already been created.
non_mixed_precision_session_created = False
# Whether the default tf.keras.mixed_precision.experimental.Policy is in effect.
# Used to raise an error message if both a non-default Policy and the graph
# rewrite are used at the same time.
using_default_mixed_precision_policy = True
|
tensorflow-master
|
tensorflow/python/training/experimental/mixed_precision_global_state.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import test_util
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer as loss_scale_optimizer_v2
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import gradient_descent as gradient_descent_v1
from tensorflow.python.training.experimental import loss_scale_optimizer as loss_scale_optimizer_v1
from tensorflow.python.training.experimental import mixed_precision
from tensorflow.python.training.experimental import mixed_precision_global_state
class MixedPrecisionTest(test.TestCase, parameterized.TestCase):
IGNORE_PERF_VAR = 'TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'
def setUp(self):
super(MixedPrecisionTest, self).setUp()
# Enable the tests to be run on pre-Volta GPUs by telling the grappler pass
# to ignore performance and always transform the graph.
self._original_ignore_perf_value = os.getenv(self.IGNORE_PERF_VAR)
os.environ[self.IGNORE_PERF_VAR] = '1'
def tearDown(self):
# Set the IGNORE_PERF_VAR variable back to it's original value.
if self._original_ignore_perf_value is not None:
os.environ[self.IGNORE_PERF_VAR] = self._original_ignore_perf_value
else:
del os.environ[self.IGNORE_PERF_VAR]
mixed_precision.disable_mixed_precision_graph_rewrite()
super(MixedPrecisionTest, self).tearDown()
@test_util.run_in_graph_and_eager_modes
def test_wrap_optimizer(self):
opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
opt = mixed_precision.enable_mixed_precision_graph_rewrite(opt, 123.)
self.assertIsInstance(
opt, loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer)
self.assertEqual(self.evaluate(opt._loss_scale()), 123.)
opt = gradient_descent_v2.SGD(1.0)
opt = mixed_precision.enable_mixed_precision_graph_rewrite(opt, 123.)
self.assertIsInstance(
opt, loss_scale_optimizer_v2.LossScaleOptimizer)
self.assertEqual(self.evaluate(opt._loss_scale()), 123.)
@test_util.run_in_graph_and_eager_modes
def test_optimizer_errors(self):
opt = 1
expected_regex = ('"opt" must be an instance of a tf.train.Optimizer or '
'a tf.keras.optimizers.Optimizer, but got')
with self.assertRaisesRegexp(ValueError, expected_regex):
mixed_precision.enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
opt = loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
'dynamic')
with self.assertRaisesRegexp(ValueError,
'"opt" must not already be an instance of a '
'MixedPrecisionLossScaleOptimizer.'):
mixed_precision.enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
opt = gradient_descent_v2.SGD(1.0)
opt = loss_scale_optimizer_v2.LossScaleOptimizer(opt, 'dynamic')
with self.assertRaisesRegexp(ValueError,
'"opt" must not already be an instance of a '
'LossScaleOptimizer.'):
mixed_precision.enable_mixed_precision_graph_rewrite(opt)
self.assertFalse(config.get_optimizer_experimental_options()
.get('auto_mixed_precision', False))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def test_grappler_pass_enabled(self):
opt = gradient_descent_v2.SGD(1.0)
mixed_precision.enable_mixed_precision_graph_rewrite(opt, 123.)
var = variables.Variable([[1.0]])
def overflow_in_float16():
out = var * 2 ** 10
out = math_ops.matmul(out, out)
return array_ops.reshape(out, ())
if context.executing_eagerly():
f = def_function.function(overflow_in_float16)
self.assertEqual(f().numpy(), float('Inf'))
# Outside a def_function.function, the grappler pass will not be applied.
self.assertAlmostEqual(overflow_in_float16().numpy(), 2 ** 20)
# Test disabling mixed precision.
mixed_precision.disable_mixed_precision_graph_rewrite()
self.assertEqual(f().numpy(), 2 ** 20)
else:
with session.Session() as sess:
out = overflow_in_float16()
sess.run(var.initializer)
self.assertEqual(sess.run(out), float('Inf'))
# Test Session will enable the auto_mixed_precision grappler pass in a
# ConfigProto passed by the user
with session.Session(config=config_pb2.ConfigProto()) as sess:
out = overflow_in_float16()
sess.run(var.initializer)
self.assertEqual(sess.run(out), float('Inf'))
# Test disabling mixed precision.
mixed_precision.disable_mixed_precision_graph_rewrite()
with session.Session() as sess:
out = overflow_in_float16()
sess.run(var.initializer)
self.assertAlmostEqual(sess.run(out), 2 ** 20)
@test.mock.patch.object(tf_logging, 'warn')
def test_warn_if_session_already_exists(self, mock_warn):
# Set this to False, so Sessions created in previous tests do not trigger
# the warning.
mixed_precision_global_state.non_mixed_precision_session_created = False
with session.Session():
mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent_v2.SGD(1.0))
mock_warn.assert_any_call(
'You already have existing Sessions that do not use mixed precision. '
'enable_mixed_precision_graph_rewrite() will not affect these '
'Sessions.')
@test.mock.patch.object(tf_logging, 'warn')
def test_do_not_warn_if_session_does_not_already_exist(self, mock_warn):
# Set this to False, so Sessions created in previous tests do not trigger
# the warning.
mixed_precision_global_state.non_mixed_precision_session_created = False
mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent_v2.SGD(1.0))
with session.Session():
# Make sure the "You already have existing Sessions" warning was not
# issued, since the Session was only created after
# enable_mixed_precision_graph_rewrite.
for call_arg in mock_warn.call_args_list:
msg = call_arg[0][0]
self.assertNotIn('You already have existing Sessions that do not use '
'mixed precision', msg)
def test_error_if_policy_is_set(self):
with policy.policy_scope('infer_float32_vars'):
with self.assertRaisesRegexp(
ValueError, 'a keras mixed precision Policy has been set'):
mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent_v2.SGD(1.0))
# Test no error is thrown when the policy is current the default.
mixed_precision.enable_mixed_precision_graph_rewrite(
gradient_descent_v2.SGD(1.0))
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/experimental/mixed_precision_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MixedPrecisionLossScaleOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.keras.mixed_precision.experimental import test_util as mp_test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import momentum
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.training.experimental import loss_scale_optimizer
from tensorflow.python.training.tracking import util as trackable_utils
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = distribution_strategy_context.get_strategy
def create_mirrored_strategy():
if context.num_gpus() >= 1:
return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return mirrored_strategy.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'Base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'Distribute',
'strategy_fn': create_mirrored_strategy
})
def get_gradients(opt, loss, params):
grads_and_vars = opt.compute_gradients(loss, params)
grads, _ = zip(*grads_and_vars)
return grads
class MixedPrecisionLossScaleOptimizerTest(test.TestCase,
parameterized.TestCase):
def _run_if_in_graph_mode(self, val):
# Running only in graph mode is useful, because optimizers sometimes return
# a value that, in Graph mode, is runnable with self.evaluate. But in Eager
# mode, the optimizer already does the computations and the return value
# cannot be run.
if not context.executing_eagerly():
self.evaluate(val)
def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad):
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(
expected_grad)
loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync
return lambda: opt.minimize(loss, var_list=[var])
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testFixedLossScaleAppliedToLossWithMinimize(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = variables.Variable([5.0])
opt = gradient_descent.GradientDescentOptimizer(2.0)
loss_scale = 10.
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
# We need num_replicas_in_sync to divide loss_scale, otherwise loss_scale
# / strategy.num_replicas_in_sync will not be exact, which could lead to
# assertion failures due to rounding issues.
self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(
strategy, var, opt, loss_scale / strategy.num_replicas_in_sync)
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
@test_util.deprecated_graph_mode_only
def testFixedLossScaleAppliedToLossWithGetGradients(self):
var = variables.Variable([2.0])
opt = gradient_descent.GradientDescentOptimizer(1.0)
loss_scale = 10.
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(opt, loss_scale)
grad_check_fn = mp_test_util.create_identity_with_grad_check_fn(loss_scale)
loss = grad_check_fn(var)
run_op = get_gradients(opt, loss, [var])
self.evaluate(variables.global_variables_initializer())
# This will cause an assertion to run, as
# mp_test_util.create_identity_with_grad_check_fn added an assertion op.
self.evaluate(run_op)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicLossScale(self, strategy_fn):
strategy = strategy_fn()
learning_rate = 2.
expected_gradient = resource_variable_ops.ResourceVariable(
learning_rate / strategy.num_replicas_in_sync)
with strategy.scope():
var = variables.Variable([5.0])
opt = gradient_descent.GradientDescentOptimizer(learning_rate)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=2, increment_period=1, multiplier=2)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
self.assertEqual(
loss_scale.initial_loss_scale % strategy.num_replicas_in_sync, 0)
run_fn = self._run_fn_with_grad_check(strategy, var, opt,
expected_gradient)
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The loss is the identity of the variable. Therefore the gradient is 1,
# and so the variable will be init_val - grad * lr == 5 - 1 * 2 == 3
self.assertAllClose([3.], self.evaluate(var))
# Loss scale will be double, so the expected gradient is also doubled.
self.evaluate(
expected_gradient.assign(2 * learning_rate /
strategy.num_replicas_in_sync))
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# As before, the 2 is subtracted from the variable, making it's new value
# 1.
self.assertAllClose([1.], self.evaluate(var))
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicUpdate(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = variables.Variable([1.0, 2.0])
opt = gradient_descent.GradientDescentOptimizer(1.0)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=2, increment_period=1, multiplier=2)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
# Test optimizer with finite gradients
loss = lambda: var * 2.0 / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# Gradient is 2, so variable will have 2 subtracted from it
self.assertAllClose([-1.0, 0.0], self.evaluate(var))
# Loss scale has doubled from 2 to 4
self.assertEqual(4., self.evaluate(opt._loss_scale()))
# Test optimizer with NaN gradients
loss = lambda: var * float('NaN')
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# Variable should not change from before, due to NaN gradients.
self.assertAllClose(self.evaluate(var), [-1.0, 0.0])
# Loss scale should half due to NaN gradients.
self.assertEqual(2., self.evaluate(opt._loss_scale()))
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testDynamicLossScaleWithSlots(self, strategy_fn):
with strategy_fn().scope() as strategy:
var = variables.Variable([1.0, 2.0])
# An SGD optimizer with momentum has slot variables.
opt = momentum.MomentumOptimizer(1.0, momentum=1.)
initial_loss_scale = 2.
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=initial_loss_scale,
increment_period=1,
multiplier=4)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
loss = lambda: var / strategy.num_replicas_in_sync
run_fn = lambda: opt.minimize(loss, var_list=[var])
run_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self._run_if_in_graph_mode(run_op)
# The momentum accumulator starts at 0 and the gradient is 1. The
# accumulator is incremented by the gradient, so it is now 1. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 1.
self.assertAllClose([0.0, 1.0], self.evaluate(var))
self.assertEqual(self.evaluate(opt._loss_scale()), initial_loss_scale * 4)
run_op = strategy.experimental_run(run_fn)
self._run_if_in_graph_mode(run_op)
# The momentum accumulator was 1 before this step and the gradient is 1.
# The accumulator is incremented by the gradient, so it is now 2. Then the
# variable is subtracted by the accumulator, so the variable is subtracted
# by 2.
self.assertAllClose([-2., -1.], self.evaluate(var))
self.assertEqual(
self.evaluate(opt._loss_scale()), initial_loss_scale * 16)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def testCheckpoint(self, strategy_fn):
strategy = strategy_fn()
if (isinstance(strategy, mirrored_strategy.MirroredStrategy) and
not context.executing_eagerly()):
# TODO(b/121381184): Enable running the test in this case.
return
with self.test_session(), strategy.scope():
# Build and run a simple model.
var = variables.Variable([2.0])
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1., increment_period=2., multiplier=2.)
opt = momentum.MomentumOptimizer(1.0, momentum=1.)
opt = loss_scale_optimizer.MixedPrecisionLossScaleOptimizer(
opt, loss_scale)
run_fn = lambda: opt.minimize(lambda: var + 1., var_list=[var])
opt_op = strategy.experimental_run(run_fn)
self.evaluate(variables.global_variables_initializer())
self.evaluate(opt_op)
self.assertEqual(self.evaluate(loss_scale()), 1.)
self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1)
# Save a checkpoint.
checkpoint = trackable_utils.Checkpoint(optimizer=opt)
prefix = os.path.join(self.get_temp_dir(), 'ckpt')
save_path = checkpoint.save(prefix)
# Run model again.
self.evaluate(strategy.experimental_run(run_fn))
self.assertEqual(self.evaluate(loss_scale()), 2.)
self.assertEqual(self.evaluate(loss_scale._num_good_steps), 0)
# Load checkpoint and ensure loss scale is back to it's original value.
status = checkpoint.restore(save_path)
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(self.evaluate(loss_scale()), 1.)
self.assertEqual(self.evaluate(loss_scale._num_good_steps), 1)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/experimental/loss_scale_optimizer_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains LossScale classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.ops import variable_scope
from tensorflow.python.util.tf_export import tf_export
@six.add_metaclass(abc.ABCMeta)
@tf_export('train.experimental.LossScale')
class LossScale(trackable.Trackable):
"""Loss scale base class.
Loss scaling is a process that multiplies the loss by a multiplier called the
loss scale, and divides each gradient by the same multiplier. The pseudocode
for this process is:
```
loss = ...
loss *= loss_scale
grads = gradients(loss, vars)
grads /= loss_scale
```
Mathematically, loss scaling has no effect, but can help avoid numerical
underflow in intermediate gradients when float16 tensors are used for mixed
precision training. By multiplying the loss, each intermediate gradient will
have the same multiplier applied.
Instances of this class represent a loss scale. Calling instances of this
class returns the loss scale as a scalar float32 tensor, while method
`update()` updates the loss scale depending on the values of the gradients.
Optimizers use instances of this class to scale loss and gradients.
"""
def __init__(self):
"""Initializes the loss scale class."""
self._weights = {}
@abc.abstractmethod
def __call__(self):
"""Returns the current loss scale as a scalar `float32` tensor."""
pass
@abc.abstractmethod
def update(self, grads):
"""Updates the value of the loss scale.
The loss scale will be potentially updated, based on the value of `grads`.
The tensor returned by calling this class is only updated when this function
is evaluated.
In eager mode, this directly updates the loss scale, so that calling
`__call__` will return the newly updated loss scale. In graph mode,
this returns an op that, when evaluated, updates the loss scale.
This function also returns a `should_apply_gradients` bool. If False,
gradients should not be applied to the variables that step, as nonfinite
gradients were found, and the loss scale has been be updated to reduce the
chance of finding nonfinite gradients in the next step. Some loss scale
classes will always return True, as they cannot adjust themselves in
response to nonfinite gradients.
When a DistributionStrategy is used, this function may only be called in a
cross-replica context.
Args:
grads: A list of unscaled gradients, each which is the gradient of the
loss with respect to a weight. The gradients should have already been
divided by the loss scale being before passed to this function. 'None'
gradients are accepted, and are ignored.
Returns:
update_op: In eager mode, None. In graph mode, an op to update the loss
scale.
should_apply_gradients: Either a bool or a scalar boolean tensor. If
False, the caller should skip applying `grads` to the variables this
step.
"""
pass
def _add_weight(self, name, initial_value, dtype=None):
"""Adds a weight to this loss scale.
Args:
name: Variable name.
initial_value: The variable's initial value.
dtype: The type of the variable.
Returns:
A variable.
Raises:
RuntimeError: If a weight with `name` has already been added.
"""
variable = variable_scope.variable(
initial_value=initial_value,
name=name,
dtype=dtype,
trainable=False,
use_resource=True,
synchronization=variables.VariableSynchronization.AUTO,
# Set aggregation to NONE, as loss scaling variables should never be
# aggregated.
aggregation=variables.VariableAggregation.NONE)
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
key = (name, graph_key)
if self._weights.get(key, None) is not None:
raise RuntimeError('Duplicate variables detected. {}'.format(key))
self._weights[key] = variable
self._handle_deferred_dependencies(name=name, trackable=variable)
return variable
@property
def _checkpoint_dependencies(self):
"""From Trackable. Gather graph-specific weights to save."""
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
weights = []
for (name, g), v in sorted(self._weights.items(), key=lambda i: i[0][0]):
if g == graph_key:
weights.append(trackable.TrackableReference(name=name, ref=v))
return super(LossScale, self)._checkpoint_dependencies + weights
def _lookup_dependency(self, name):
"""From Trackable. Find a weight in the current graph."""
unconditional = super(LossScale, self)._lookup_dependency(name)
if unconditional is not None:
return unconditional
if context.executing_eagerly():
graph_key = None
else:
graph = ops.get_default_graph()
graph_key = graph._graph_key # pylint: disable=protected-access
return self._weights.get((name, graph_key), None)
@abc.abstractmethod
def get_config(self):
"""Returns the config of this loss scale."""
pass
@classmethod
def from_config(cls, config):
"""Creates the LossScale from its config."""
return cls(**config)
def get_loss_scale_weights(loss_scale):
return loss_scale._weights.values() # pylint: disable=protected-access
@tf_export('train.experimental.FixedLossScale')
class FixedLossScale(LossScale):
"""Loss scale with a fixed value.
The loss scale is not updated for the lifetime of instances of this class.
A given instance of this class always returns the same number when called.
"""
def __init__(self, loss_scale_value):
"""Creates the fixed loss scale.
Args:
loss_scale_value: A Python float. Its ideal value varies depending on
models to run. Choosing a too small loss_scale might affect model
quality; a too big loss_scale might cause inf or nan. There is no single
right loss_scale to apply. There is no harm choosing a relatively big
number as long as no nan or inf is encountered in training.
Raises:
ValueError: If loss_scale is less than 1.
"""
super(FixedLossScale, self).__init__()
if not isinstance(loss_scale_value, six.integer_types + (float,)):
raise ValueError('loss_scale_value must be a Python int or float.')
if loss_scale_value < 1:
raise ValueError('loss_scale_value must be at least 1.')
# It's important we do not create tensors in the constructor, as such
# tensors might be on a different device or tf.function vs when the tensor
# is used. This would hurt performance. Therefore, we do not create a tensor
# from loss_scale_value, but instead leave it as a Python float.
# TODO(reedwm): Also do not create tensors in the DynamicLossScale
# constructor.
self._loss_scale_value = float(loss_scale_value)
def __call__(self):
return ops.convert_to_tensor(self._loss_scale_value)
def update(self, grads):
del grads
return control_flow_ops.no_op(), True
def get_config(self):
return {'loss_scale_value': self._loss_scale_value}
def _is_all_finite(grads):
"""Returns a scalar boolean tensor indicating if all gradients are finite."""
is_finite_per_grad = [
math_ops.reduce_all(math_ops.is_finite(g)) for g in grads if g is not None
]
return math_ops.reduce_all(is_finite_per_grad)
def _op_in_graph_mode(tensor):
"""Returns the tensor's op in graph mode, or the tensor in eager mode.
This is useful because sometimes an op is needed in graph mode instead of a
tensor. In eager mode, there are no ops.
Args:
tensor: A tensor.
Returns:
The tensor's op in graph mode. The tensor in eager mode.
"""
if context.executing_eagerly():
return tensor
return tensor.op
def _assign_if_finite(var, value):
"""Assigns a value to a variable if the value is finite."""
return control_flow_ops.cond(
math_ops.is_finite(value), lambda: _op_in_graph_mode(var.assign(value)),
control_flow_ops.no_op)
@tf_export('train.experimental.DynamicLossScale')
class DynamicLossScale(LossScale):
"""Loss scale that dynamically adjusts itself.
Dynamic loss scaling works by adjusting the loss scale as training progresses.
The goal is to keep the loss scale as high as possible without overflowing the
gradients. As long as the gradients do not overflow, raising the loss scale
never hurts.
The algorithm starts by setting the loss scale to an initial value. Every N
steps that the gradients are finite, the loss scale is increased by some
factor. However, if a NaN or Inf gradient is found, the gradients for that
step are not applied, and the loss scale is decreased by the factor. This
process tends to keep the loss scale as high as possible without gradients
overflowing.
"""
def __init__(self,
initial_loss_scale=2 ** 15, # See docstring for why this is big.
increment_period=2000,
multiplier=2.):
"""Creates the dynamic loss scale.
Args:
initial_loss_scale: A Python float. The loss scale to use at the
beginning. It's better to start this at a very high number, because a
loss scale that is too high gets lowered far more quickly than a loss
scale that is too low gets raised. The default is 2 ** 15, which is
approximately half the maximum float16 value.
increment_period: Increases loss scale every `increment_period`
consecutive steps that finite gradients are encountered. If a nonfinite
gradient is encountered, the count is reset back to zero.
multiplier: The multiplier to use when increasing or decreasing the loss
scale.
"""
super(DynamicLossScale, self).__init__()
self._initial_loss_scale = float(initial_loss_scale)
self._increment_period = int(increment_period)
self._multiplier = float(multiplier)
self._current_loss_scale = self._add_weight(
name='current_loss_scale',
dtype=dtypes.float32,
initial_value=self._initial_loss_scale)
# The number of consecutive steps with finite gradients since the last
# nonfinite gradient or change in loss scale.
self._num_good_steps = self._add_weight(
name='good_steps', dtype=dtypes.int64, initial_value=0)
@property
def initial_loss_scale(self):
return self._initial_loss_scale
@property
def increment_period(self):
return self._increment_period
@property
def multiplier(self):
return self._multiplier
def __call__(self):
return self._current_loss_scale
def update(self, grads):
"""Updates loss scale based on if gradients are finite in current step."""
if distribution_strategy_context.has_strategy():
distribution = distribution_strategy_context.get_cross_replica_context()
def get_is_finite(grads):
is_finite = _is_all_finite(grads)
# We cast to float, because we cannot reduce booleans with
# DistributionStrategy.
return math_ops.cast(is_finite, dtypes.float32)
is_finite_float = distribution.extended.call_for_each_replica(
get_is_finite, args=(grads,))
reduced_is_finite_float = distribution.reduce(reduce_util.ReduceOp.SUM,
is_finite_float, axis=None)
is_finite = math_ops.equal(reduced_is_finite_float,
distribution.num_replicas_in_sync)
else:
is_finite = _is_all_finite(grads)
def update_if_finite_grads():
"""Update assuming the gradients are finite."""
def incr_loss_scale():
new_loss_scale = self._current_loss_scale * self._multiplier
return control_flow_ops.group(
_assign_if_finite(self._current_loss_scale, new_loss_scale),
self._num_good_steps.assign(0))
return control_flow_ops.cond(
self._num_good_steps + 1 >= self._increment_period,
incr_loss_scale, lambda: _op_in_graph_mode(
self._num_good_steps.assign_add(1)))
def update_if_not_finite_grads():
"""Update assuming the gradients are nonfinite."""
new_loss_scale = math_ops.maximum(
self._current_loss_scale / self._multiplier, 1)
return control_flow_ops.group(
self._num_good_steps.assign(0),
self._current_loss_scale.assign(new_loss_scale))
update_op = control_flow_ops.cond(is_finite, update_if_finite_grads,
update_if_not_finite_grads)
should_apply_gradients = is_finite
return update_op, should_apply_gradients
def get_config(self):
return {
'initial_loss_scale': self.initial_loss_scale,
'increment_period': self.increment_period,
'multiplier': self.multiplier,
}
def get(identifier):
"""Get a loss scale object."""
if isinstance(identifier, six.integer_types + (float,)):
return FixedLossScale(identifier)
if identifier == 'dynamic':
return DynamicLossScale()
if isinstance(identifier, LossScale):
return identifier
elif identifier is None:
return None
else:
raise ValueError('Could not interpret loss scale identifier: %s' %
identifier)
|
tensorflow-master
|
tensorflow/python/training/experimental/loss_scale.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScale classes.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
# TODO(reedwm): Create test case using multiple graphs
# If called outside any strategy.scope() calls, this will return the default
# strategy.
default_strategy_fn = distribution_strategy_context.get_strategy
def create_mirrored_strategy():
if context.num_gpus() >= 1:
return mirrored_strategy.MirroredStrategy(['cpu:0', 'gpu:0'])
else:
return mirrored_strategy.MirroredStrategy(['cpu:0'])
TESTCASES = ({
'testcase_name': 'base',
'strategy_fn': default_strategy_fn
}, {
'testcase_name': 'distribute',
'strategy_fn': create_mirrored_strategy
})
class FixedLossScaleTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_basic(self):
loss_scale_value = 1000
loss_scale = loss_scale_module.FixedLossScale(loss_scale_value)
update_op, should_apply = loss_scale.update([constant_op.constant(0.)])
self.evaluate(update_op)
# should_apply should be a bool instead of a tensor, so that a tf.cond does
# not have to be built in the graph by the caller.
self.assertIsInstance(should_apply, bool)
self.assertTrue(should_apply)
self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))
update_op, should_apply = loss_scale.update(
[constant_op.constant(float('NaN'))])
self.evaluate(update_op)
self.assertIsInstance(should_apply, bool)
self.assertTrue(should_apply)
self.assertEqual(loss_scale_value, self.evaluate(loss_scale()))
@test_util.run_in_graph_and_eager_modes
def test_serialization(self):
loss_scale = loss_scale_module.get(123)
config = loss_scale.get_config()
loss_scale = loss_scale_module.FixedLossScale.from_config(config)
self.assertEqual(self.evaluate(loss_scale()), 123.)
def _get_example_iter(inputs):
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
return dataset_ops.make_one_shot_iterator(dataset)
class DynamicLossScaleTest(test.TestCase, parameterized.TestCase):
def _get_tensor(self, is_finite):
tensor = control_flow_ops.cond(is_finite, lambda: 1., lambda: float('NaN'))
if not distribution_strategy_context.has_strategy():
return tensor
def get():
rep_id = (
distribution_strategy_context.get_replica_context()
.replica_id_in_sync_group)
return control_flow_ops.cond(
math_ops.equal(rep_id, 0), lambda: tensor, lambda: 1.)
distribution = distribution_strategy_context.get_strategy()
return distribution.extended.call_for_each_replica(get)
def _test_helper(self,
inputs,
expected_outputs,
initial_loss_scale=1.,
increment_period=2,
multiplier=2):
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=initial_loss_scale,
increment_period=increment_period,
multiplier=multiplier)
itr = _get_example_iter(inputs)
def update():
is_finite = itr.get_next()
grad = self._get_tensor(is_finite)
update_op, should_apply_gradients = loss_scale.update([grad])
assert_op = check_ops.assert_equal(should_apply_gradients, is_finite)
if context.executing_eagerly():
return
with ops.control_dependencies([assert_op]):
return array_ops.identity(update_op)
actual_outputs = []
if not context.executing_eagerly():
update_op = update()
self.evaluate(variables.global_variables_initializer())
for _ in range(len(inputs)):
if context.executing_eagerly():
update()
else:
self.evaluate(update_op)
actual_outputs.append(self.evaluate(loss_scale()))
self.assertEqual(actual_outputs, expected_outputs)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_increase(self, strategy_fn):
with strategy_fn().scope():
inputs = [True] * 6
expected_outputs = [1, 2, 2, 4, 4, 8]
self._test_helper(inputs, expected_outputs)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_keep_increasing_until_capped(self, strategy_fn):
with strategy_fn().scope():
init_loss_scale = np.finfo(np.float32).max / 4
max_float = np.finfo(np.float32).max
inputs = [True] * 6
# Output is capped the 2nd time it doubles.
expected_outputs = [
init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float,
max_float, max_float
]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_decrease_every_step(self, strategy_fn):
with strategy_fn().scope():
inputs = [False] * 6
init_loss_scale = 1024
expected_outputs = [512, 256, 128, 64, 32, 16]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_keep_decreasing_until_one(self, strategy_fn):
with strategy_fn().scope():
inputs = [False] * 6
init_loss_scale = 16
expected_outputs = [8, 4, 2, 1, 1, 1]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_nan_clear_good_step(self, strategy_fn):
with strategy_fn().scope():
inputs = [True, True, True, False, True]
expected_outputs = [1, 2, 2, 1, 1]
self._test_helper(inputs, expected_outputs)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_trigger_loss_scale_update_each_step(self, strategy_fn):
with strategy_fn().scope():
init_loss_scale = 1
increment_period = 1
inputs = [True] * 3 + [False, True, True]
expected_outputs = [2, 4, 8, 4, 8, 16]
self._test_helper(inputs, expected_outputs, init_loss_scale,
increment_period)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_each_step(
self, strategy_fn):
with strategy_fn().scope():
init_loss_scale = 1
increment_period = 1
inputs = [True, False] * 4 + [True]
expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2]
self._test_helper(inputs, expected_outputs, init_loss_scale,
increment_period)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_every_other_step(
self, strategy_fn):
with strategy_fn().scope():
init_loss_scale = 32
increment_period = 2
inputs = [True, False] * 3 + [True]
expected_outputs = [32, 16, 16, 8, 8, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale,
increment_period)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_nondefault_multiplier(self, strategy_fn):
with strategy_fn().scope():
init_loss_scale = 4
multiplier = 3
inputs = [True, True, False, True, True]
expected_outputs = [4, 12, 4, 4, 12]
self._test_helper(
inputs, expected_outputs, init_loss_scale, multiplier=multiplier)
@parameterized.named_parameters(*TESTCASES)
@test_util.run_in_graph_and_eager_modes
def test_random_mix_good_and_bad_gradients(self, strategy_fn):
with strategy_fn().scope():
init_loss_scale = 4
inputs = [
False, True, True, True, False, True, False, True, True, True, False
]
expected_outputs = [2, 2, 4, 4, 2, 2, 1, 1, 2, 2, 1]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_serialization(self):
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=1, increment_period=2, multiplier=3)
config = loss_scale.get_config()
loss_scale = loss_scale_module.DynamicLossScale.from_config(config)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(loss_scale()), 1)
self.assertEqual(loss_scale.increment_period, 2)
self.assertEqual(loss_scale.multiplier, 3)
@test_util.run_in_graph_and_eager_modes
def test_update_with_none_gradients(self):
loss_scale = loss_scale_module.DynamicLossScale()
loss_scale.update([None])
@test_util.run_in_graph_and_eager_modes
def test_get(self):
scalar = loss_scale_module.get('dynamic')
scalar2 = loss_scale_module.DynamicLossScale()
self.assertEqual(scalar.initial_loss_scale, scalar2.initial_loss_scale)
self.assertEqual(scalar.increment_period, scalar2.increment_period)
self.assertEqual(scalar.multiplier, scalar2.multiplier)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/training/experimental/loss_scale_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains LossScale classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['train.experimental.MixedPrecisionLossScaleOptimizer'])
class MixedPrecisionLossScaleOptimizer(optimizer.Optimizer):
"""An optimizer that applies loss scaling.
Loss scaling is a process that multiplies the loss by a multiplier called the
loss scale, and divides each gradient by the same multiplier. The pseudocode
for this process is:
```
loss = ...
loss *= loss_scale
grads = gradients(loss, vars)
grads /= loss_scale
```
Mathematically, loss scaling has no effect, but can help avoid numerical
underflow in intermediate gradients when float16 tensors are used for mixed
precision training. By multiplying the loss, each intermediate gradient will
have the same multiplier applied.
The loss scale can either be a fixed constant, chosen by the user, or be
dynamically determined. Dynamically determining the loss scale is convenient
as a loss scale does not have to be explicitly chosen. However it reduces
performance.
This optimizer wraps another optimizer and applies loss scaling to it via a
`LossScale`. Loss scaling is applied whenever gradients are
computed, such as through `minimize()`.
"""
def __init__(self, opt, loss_scale):
if not isinstance(opt, optimizer.Optimizer):
raise ValueError('"opt" must be an instance of Optimizer, but got: %s' %
type(opt))
self._optimizer = opt
use_locking = opt._use_locking # pylint: disable=protected-access
name = opt.get_name()
super(MixedPrecisionLossScaleOptimizer, self).__init__(use_locking, name)
self._loss_scale = loss_scale_module.get(loss_scale)
self._track_trackable(self._optimizer, 'base_optimizer')
self._track_trackable(self._loss_scale, 'loss_scale')
def _doing_dynamic_loss_scaling(self):
"""Check if `_loss_scale` dynamically manages the loss scale."""
return isinstance(self._loss_scale, loss_scale_module.DynamicLossScale)
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
This adjusts the dynamic range of the gradient evaluation by scaling up
the `loss` value. The gradient values are then scaled back down by the
recipricol of the loss scale. This is useful in reduced precision training
where small gradient values would otherwise underflow the representable
range.
Args:
loss: A Tensor containing the value to minimize or a callable taking no
arguments which returns the value to minimize. When eager execution is
enabled it must be a callable.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph under
the key `GraphKeys.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
"""
loss = self._scale_loss(loss)
grads_and_vars = self._optimizer.compute_gradients(
loss=loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
grads = [g for g, _ in grads_and_vars]
variables = [v for _, v in grads_and_vars]
unscaled_grads = self._unscale_grads(grads)
return list(zip(unscaled_grads, variables))
def _scale_loss(self, loss):
loss_scale = self._loss_scale()
if callable(loss):
return lambda: loss() * loss_scale
return loss * loss_scale
def _unscale_grads(self, grads):
loss_scale = self._loss_scale()
loss_scale_reciprical = 1 / loss_scale
return [
None if g is None else self._scale_grad(g, loss_scale_reciprical)
for g in grads
]
def _scale_grad(self, grad, loss_scale_reciprical):
if isinstance(grad, ops.IndexedSlices):
grad_vals = grad.values * loss_scale_reciprical
return ops.IndexedSlices(grad_vals, grad.indices, grad.dense_shape)
return grad * loss_scale_reciprical
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This is the second part of `minimize()`. It returns an `Operation` that
conditionally applies gradients if all gradient values are finite.
Otherwise no update is performed (nor is `global_step` incremented).
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that conditionally applies the specified gradients. If
`global_step` was not None, that operation also increments `global_step`.
Raises:
RuntimeError: If you should use `_distributed_apply()` instead.
"""
if distribution_strategy_context.in_cross_replica_context():
raise ValueError('apply_gradients() must be called in a replica context.')
if not self._doing_dynamic_loss_scaling():
return self._optimizer.apply_gradients(grads_and_vars, global_step, name)
replica_context = distribution_strategy_context.get_replica_context()
grads_and_vars = tuple(grads_and_vars)
# TODO(nluehr) cleanup GraphKeys.TRAIN_OP
return replica_context.merge_call(
self._distributed_apply, args=(grads_and_vars, global_step, name))
def _distributed_apply(self,
distribution,
grads_and_vars,
global_step=None,
name=None):
"""A version of `apply_gradients` for cross replica context.
When users are in a cross replica strategy, they must call this rather than
`apply_gradients()`.
Args:
distribution: a `DistributionStrategy` object.
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()` and then aggregated across replicas.
global_step: Optional (mirrored) `Variable` to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the name passed
to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients across all
replicas. If `global_step` was not None, that operation also
increments `global_step`
"""
name = name if name is not None else self.get_name()
grads = [g for g, _ in grads_and_vars]
loss_scale_update_op, should_apply_grads = (self._loss_scale.update(grads))
def apply_fn():
return self._apply_gradients(distribution, grads_and_vars, global_step,
name + '-wrapped')
maybe_apply_op = smart_cond.smart_cond(should_apply_grads, apply_fn,
control_flow_ops.no_op)
return control_flow_ops.group(
maybe_apply_op, loss_scale_update_op, name=name)
def _apply_gradients(self, distribution, grads_and_vars, global_step, name):
"""Unconditionally apply gradients in cross replica context."""
update_ops = distribution.extended.call_for_each_replica(
self._optimizer.apply_gradients,
args=(grads_and_vars, global_step, name))
return distribution.group(update_ops)
def _apply_sparse(self, grad, var):
"""This function should never be called."""
raise RuntimeError('This function should never be called')
def _apply_dense(self, grad, var):
"""This function should never be called."""
raise RuntimeError('This function should never be called')
def _resource_apply_sparse(self, grad, handle, indices):
"""This function should never be called."""
raise RuntimeError('This function should never be called')
def _resource_apply_dense(self, grad, handle):
"""This function should never be called."""
raise RuntimeError('This function should never be called')
|
tensorflow-master
|
tensorflow/python/training/experimental/loss_scale_optimizer.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import util
class InterfaceTests(test.TestCase):
def testOverwrite(self):
root = base.Trackable()
leaf = base.Trackable()
root._track_trackable(leaf, name="leaf")
(current_name, current_dependency), = root._checkpoint_dependencies
self.assertIs(leaf, current_dependency)
self.assertEqual("leaf", current_name)
duplicate_name_dep = base.Trackable()
with self.assertRaises(ValueError):
root._track_trackable(duplicate_name_dep, name="leaf")
root._track_trackable(duplicate_name_dep, name="leaf", overwrite=True)
(current_name, current_dependency), = root._checkpoint_dependencies
self.assertIs(duplicate_name_dep, current_dependency)
self.assertEqual("leaf", current_name)
def testAddVariableOverwrite(self):
root = base.Trackable()
a = root._add_variable_with_custom_getter(
name="v", shape=[], getter=variable_scope.get_variable)
self.assertEqual([root, a], util.list_objects(root))
with ops.Graph().as_default():
b = root._add_variable_with_custom_getter(
name="v", shape=[], overwrite=True,
getter=variable_scope.get_variable)
self.assertEqual([root, b], util.list_objects(root))
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError, "already declared as a dependency"):
root._add_variable_with_custom_getter(
name="v", shape=[], overwrite=False,
getter=variable_scope.get_variable)
def testAssertConsumedWithUnusedPythonState(self):
has_config = base.Trackable()
has_config.get_config = lambda: {}
saved = util.Checkpoint(obj=has_config)
save_path = saved.save(os.path.join(self.get_temp_dir(), "ckpt"))
restored = util.Checkpoint(obj=base.Trackable())
restored.restore(save_path).assert_consumed()
def testAssertConsumedFailsWithUsedPythonState(self):
has_config = base.Trackable()
attributes = {
"foo_attr": functools.partial(
base.PythonStringStateSaveable,
state_callback=lambda: "",
restore_callback=lambda x: None)}
has_config._gather_saveables_for_checkpoint = lambda: attributes
saved = util.Checkpoint(obj=has_config)
save_path = saved.save(os.path.join(self.get_temp_dir(), "ckpt"))
restored = util.Checkpoint(obj=base.Trackable())
status = restored.restore(save_path)
with self.assertRaisesRegexp(AssertionError, "foo_attr"):
status.assert_consumed()
def testBuggyGetConfig(self):
class NotSerializable(object):
pass
class GetConfigRaisesError(base.Trackable):
def get_config(self):
return NotSerializable()
util.Checkpoint(obj=GetConfigRaisesError()).save(
os.path.join(self.get_temp_dir(), "ckpt"))
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/base_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to layer/model functionality."""
# TODO(b/110718070): Move these functions back to tensorflow/python/keras/utils
# once __init__ files no longer require all of tf.keras to be imported together.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.training.tracking import object_identity
def is_layer(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
return hasattr(obj, "_is_layer") and not isinstance(obj, type)
def has_weights(obj):
"""Implicit check for Layer-like objects."""
# TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).
has_weight = (hasattr(type(obj), "trainable_weights")
and hasattr(type(obj), "non_trainable_weights"))
return has_weight and not isinstance(obj, type)
def filter_empty_layer_containers(layer_list):
"""Filter out empty Layer-like containers and uniquify."""
# TODO(b/130381733): Make this an attribute in base_layer.Layer.
existing = object_identity.ObjectIdentitySet()
to_visit = layer_list[::-1]
filtered = []
while to_visit:
obj = to_visit.pop()
if obj in existing:
continue
existing.add(obj)
if is_layer(obj):
filtered.append(obj)
elif hasattr(obj, "layers"):
# Trackable data structures will not show up in ".layers" lists, but
# the layers they contain will.
to_visit.extend(obj.layers[::-1])
return filtered
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
|
tensorflow-master
|
tensorflow/python/training/tracking/layer_utils.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
class Subclassed(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(Subclassed, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class CheckpointingTests(xla_test.XLATestCase):
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
for training_continuation in range(3):
with self.test_scope():
model = Subclassed()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=2)
root.restore(manager.latest_checkpoint)
for _ in range(num_training_steps):
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer.iterations.numpy())
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/util_xla_test.py
|
"""Utilities for saving/loading Trackable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import os
import weakref
import six
from tensorflow.core.protobuf import trackable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops as io_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as v1_saver_lib
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import graph_view as graph_view_lib
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency.
keras_backend = lazy_loader.LazyLoader(
"keras_backend", globals(),
"tensorflow.python.keras.backend")
def get_session():
# Prefer TF's default session since get_session from Keras has side-effects.
session = ops.get_default_session()
if session is None:
session = keras_backend.get_session()
return session
class _ObjectGraphProtoPrettyPrinter(object):
"""Lazily traverses an object graph proto to pretty print names.
If no calls to `node_names` are made this object has no performance
overhead. On the other hand, it will only traverse the object graph once, so
repeated naming is cheap after the first.
"""
def __init__(self, object_graph_proto):
self._object_graph_proto = object_graph_proto
self._node_name_cache = None
@property
def node_names(self):
"""Lazily creates a mapping from node id to ("path", "to", "root")."""
if self._node_name_cache is not None:
return self._node_name_cache
path_to_root = object_identity.ObjectIdentityDictionary()
path_to_root[0] = ("(root)",)
to_visit = collections.deque([0])
while to_visit:
node_id = to_visit.popleft()
obj = self._object_graph_proto.nodes[node_id]
for child in obj.children:
if child.node_id not in path_to_root:
path_to_root[child.node_id] = (
path_to_root[node_id] + (child.local_name,))
to_visit.append(child.node_id)
node_names = {}
for node_id, path_to_root in path_to_root.items():
node_names[node_id] = ".".join(path_to_root)
for node_id, node in enumerate(self._object_graph_proto.nodes):
for slot_reference in node.slot_variables:
node_names[slot_reference.slot_variable_node_id] = (
"{}'s state '{}' for {}".format(
node_names[node_id],
slot_reference.slot_name,
node_names[slot_reference.original_variable_node_id]))
self._node_name_cache = node_names
return node_names
class _CheckpointRestoreCoordinator(object):
"""Holds the status of an object-based checkpoint load."""
def __init__(self, object_graph_proto, save_path, save_path_tensor,
restore_op_cache, graph_view):
"""Specify the checkpoint being loaded.
Args:
object_graph_proto: The TrackableObjectGraph protocol buffer associated
with this checkpoint.
save_path: A string, the path to the checkpoint, as returned by
`tf.train.latest_checkpoint`.
save_path_tensor: A string `Tensor` which contains or will be fed the save
path.
restore_op_cache: A dictionary shared between
`_CheckpointRestoreCoordinator`s for the same Python objects, used to
look up restore ops by name to avoid re-creating them across multiple
`restore()` calls.
graph_view: A graph_view_lib.ObjectGraphView object for the restored
objects.
"""
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
# Maps from proto ids to lists of attributes which were in the checkpoint
# but not loaded into any object, for error checking.
self.unused_attributes = {}
# Dictionary mapping from an id in the protocol buffer flat array to
# Trackable Python objects. This mapping may be deferred if a
# checkpoint is restored before all dependencies have been tracked. Uses
# weak references so that partial restorations don't create reference cycles
# (as objects with deferred dependencies will generally have references to
# this object).
self.object_by_proto_id = weakref.WeakValueDictionary()
self.matched_proto_ids = set()
# A set of all Python objects we've seen as dependencies, even if we didn't
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
self.all_python_objects = object_identity.ObjectIdentityWeakSet()
self.save_path_tensor = save_path_tensor
self.save_path_string = save_path
self.dtype_map = pywrap_tensorflow.NewCheckpointReader(
save_path).get_variable_to_dtype_map()
# A NewCheckpointReader for the most recent checkpoint, for streaming Python
# state restoration.
# When graph building, contains a list of ops to run to restore objects from
# this checkpoint.
self.restore_ops = []
self.restore_ops_by_name = restore_op_cache
self.graph_view = graph_view
self.new_restore_ops_callback = None
# A mapping from optimizer proto ids to lists of slot variables to be
# restored when the optimizer is tracked. Only includes slot variables whose
# regular variables have already been created, and only for optimizer
# objects which have not yet been created/tracked.
self.deferred_slot_restorations = {}
# A mapping from variable proto ids to lists of slot variables to be
# restored when the variable is created/tracked. These get shifted over to
# deferred_slot_restorations if the optimizer hasn't been created when that
# happens.
self.slot_restorations = {}
# Controls whether errors are printed in __del__ if some objects did not
# match.
self.expect_partial = False
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
# `node` refers to an `Optimizer`, since only these have slot variables.
self.slot_restorations.setdefault(
slot_reference.original_variable_node_id, []).append(
base._SlotVariableRestoration( # pylint: disable=protected-access
optimizer_id=node_index,
slot_variable_id=slot_reference.slot_variable_node_id,
slot_name=slot_reference.slot_name))
def new_restore_ops(self, new_ops):
self.restore_ops.extend(new_ops)
if self.new_restore_ops_callback:
self.new_restore_ops_callback(new_ops) # pylint: disable=not-callable
def restore_saveables(self, tensor_saveables, python_saveables):
"""Run or build restore operations for SaveableObjects.
Args:
tensor_saveables: `SaveableObject`s which correspond to Tensors.
python_saveables: `PythonStateSaveable`s which correspond to Python
values.
Returns:
When graph building, a list of restore operations, either cached or newly
created, to restore `tensor_saveables`.
"""
restore_ops = []
# Eagerly run restorations for Python state.
reader = None
for saveable in python_saveables:
if reader is None:
# Lazily create the NewCheckpointReader, since this requires file access
# and we may not have any Python saveables.
reader = pywrap_tensorflow.NewCheckpointReader(self.save_path_string)
spec_names = [spec.name for spec in saveable.specs]
saveable.python_restore([reader.get_tensor(name) for name in spec_names])
# If we have new SaveableObjects, extract and cache restore ops.
if tensor_saveables:
validated_saveables = saveable_object_util.validate_and_slice_inputs(
tensor_saveables)
validated_names = set(saveable.name for saveable in validated_saveables)
if set(tensor_saveables.keys()) != validated_names:
raise AssertionError(
("Saveable keys changed when validating. Got back %s, was "
"expecting %s") % (tensor_saveables.keys(), validated_names))
new_restore_ops = functional_saver.MultiDeviceSaver(
validated_saveables).restore(self.save_path_tensor)
if not context.executing_eagerly():
for name, restore_op in sorted(new_restore_ops.items()):
restore_ops.append(restore_op)
assert name not in self.restore_ops_by_name
self.restore_ops_by_name[name] = restore_op
return restore_ops
def __del__(self):
if self.expect_partial:
return
if logging is None:
# The logging module may have been unloaded when __del__ is called.
log_fn = print
else:
log_fn = logging.warning
printed_warning = False
pretty_printer = _ObjectGraphProtoPrettyPrinter(self.object_graph_proto)
for node_id in range(len(self.object_graph_proto.nodes)):
if node_id not in self.matched_proto_ids:
log_fn("Unresolved object in checkpoint: {}"
.format(pretty_printer.node_names[node_id]))
printed_warning = True
for node_id, attribute_name in self.unused_attributes.items():
log_fn(("Unused attribute in object {}: {}"
.format(pretty_printer.node_names[node_id], attribute_name)))
printed_warning = True
if printed_warning:
log_fn(
"A checkpoint was restored (e.g. tf.train.Checkpoint.restore or "
"tf.keras.Model.load_weights) but not all checkpointed values were "
"used. See above for specific issues. Use expect_partial() on the "
"load status object, e.g. "
"tf.train.Checkpoint.restore(...).expect_partial(), to silence these "
"warnings, or use assert_consumed() to make the check explicit. See "
"https://www.tensorflow.org/alpha/guide/checkpoints#loading_mechanics"
" for details.")
class _NameBasedRestoreCoordinator(object):
"""Keeps the status of a name-based checkpoint restore."""
def __init__(self, save_path, dtype_map=None):
self.save_path = save_path
self.dtype_map = dtype_map
self.unused_attributes = weakref.WeakKeyDictionary()
self.restore_uid = ops.uid()
def globally_named_object_attributes(self, trackable):
"""Create globally named SaveableObjects from attributes.
If an object's attribute has no global name specified (default construction
for the SaveableObject factory), records the failure in
`self.unused_attributes` (which can then be used to make status assertions
fail; see `NameBasedSaverStatus`).
Args:
trackable: An object to save.
Yields:
SaveableObjects for `trackable`'s attributes.
"""
for attribute_name, saveable_factory in (
trackable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
if callable(saveable_factory):
try:
# This saveable object factory does not have a default name= argument,
# which means there's no way to save/restore it using a name-based
# checkpoint. Ignore the error now and make sure assert_consumed()
# fails.
saveable = saveable_factory()
except TypeError:
# Even if we can't name this object, we should construct it and check
# whether it's optional to restore it. If it's optional we don't need
# to make assertions fail.
if not saveable_factory("").optional_restore:
self.unused_attributes.setdefault(trackable,
[]).append(attribute_name)
continue
else:
saveable = saveable_factory
names_to_saveables = saveable_object_util.op_list_to_dict(
[saveable], convert_variable_to_tensor=False)
for name, op in names_to_saveables.items():
for saveable_object in saveable_object_util.saveable_objects_for_op(
op=op, name=name):
yield saveable_object
def eager_restore(self, trackable):
"""Runs restore ops for `trackable`'s attributes."""
# When graph building, we don't add any restore ops to the graph until
# run_restore_ops/initialize_or_restore on the status object for name-based
# checkpoints.
assert context.executing_eagerly()
for saveable in self.globally_named_object_attributes(trackable):
restored_tensors = []
tensor_missing = False
for spec in saveable.specs:
if spec.name in self.dtype_map:
with ops.device("cpu:0"):
restored, = io_ops.restore_v2(
prefix=self.save_path,
tensor_names=[spec.name],
shape_and_slices=[""],
dtypes=[self.dtype_map[spec.name]],
name="%s_checkpoint_read" % (spec.name,))
restored_tensors.append(array_ops.identity(restored))
else:
tensor_missing = True
if tensor_missing:
# Record that this variable didn't match so assertions will fail.
self.unused_attributes.setdefault(trackable, []).append(saveable.name)
else:
# Ignores values missing from the checkpoint, as with object-based
# restore. Status assertions can be used to check exact matches,
# although it's unlikely to ever happen for name-based checkpoints.
saveable.restore(
restored_tensors=restored_tensors, restored_shapes=None)
# TODO(allenl): If this ends up in a public API, consider adding LINT.IfChange
# or consolidating the implementation with get_variable.
def _default_getter(name,
shape,
dtype,
initializer=None,
partition_info=None,
**kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name,
shape=shape_object,
dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return variables.VariableV1(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
use_resource=True,
**kwargs)
def add_variable(trackable,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
trainable=True):
"""Add a variable to a Trackable with no scope influence."""
return trackable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
getter=_default_getter,
trainable=trainable)
def object_metadata(save_path):
"""Retrieves information about the objects in a checkpoint.
Example usage:
```python
object_graph = tf.contrib.checkpoint.object_metadata(
tf.train.latest_checkpoint(checkpoint_directory))
ckpt_variable_names = set()
for node in object_graph.nodes:
for attribute in node.attributes:
ckpt_variable_names.add(attribute.full_name)
```
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`.
Returns:
A parsed `tf.contrib.checkpoint.TrackableObjectGraph` protocol buffer.
Raises:
ValueError: If an object graph was not found in the checkpoint.
"""
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
raise ValueError(
('The specified checkpoint "%s" does not appear to be object-based (it '
'is missing the key "%s"). Likely it was created with a name-based '
"saver and does not contain an object dependency graph.") %
(save_path, base.OBJECT_GRAPH_PROTO_KEY))
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
return object_graph_proto
def list_objects(root_trackable):
"""Traverse the object graph and list all accessible objects.
Looks for `Trackable` objects which are dependencies of
`root_trackable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_trackable`
(i.e. if they would be saved with a checkpoint).
Args:
root_trackable: A `Trackable` object whose dependencies should be flattened.
Returns:
A flat list of objects.
"""
return graph_view_lib.ObjectGraphView(root_trackable).list_objects()
def gather_initializers(root_trackable):
"""Traverse the object graph and find initialization ops.
Looks for `Trackable` objects which are dependencies of
`root_trackable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_trackable` (i.e. if they would be
saved with a checkpoint).
Args:
root_trackable: A `Trackable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
trackable_objects = list_objects(root_trackable)
return [
c.initializer
for c in trackable_objects
if hasattr(c, "initializer") and c.initializer is not None
]
@tf_contextlib.contextmanager
def capture_dependencies(template):
"""Capture variables created within this scope as `Template` dependencies.
Requires that `template.variable_scope` is active.
This scope is intended as a compatibility measure, allowing a trackable
object to add dependencies on variables created in a block of code which is
not aware of object-based saving (and instead uses variable names
heavily). This is how `Template` objects add dependencies on variables and
sub-`Template`s. Where possible, use `tf.compat.v1.make_template` directly.
Args:
template: The `Template` object to register dependencies with.
Yields:
None (when used as a context manager).
"""
name_prefix = template.variable_scope.name
def _trackable_custom_creator(next_creator,
name,
initial_value,
trackable_parent=None,
**kwargs):
"""A variable creation hook which adds Trackable dependencies.
Set for example during a `Template`'s first wrapped function
execution. Ensures that (a) `template` depends on any trackable
objects using their own `capture_dependencies` scope inside this scope which
create variables, and (b) that any variables not in a more deeply nested
scope are added as dependencies directly.
The `trackable_parent` argument is passed between custom creators but
ignored when the variable object itself is created. This argument indicates
(if not `None`) that a more deeply nested scope has already added the
variable as a dependency, and that parent scopes should add a dependency on
that object rather than on the variable directly.
Args:
next_creator: See `variable_scope.variable_creator_scope`; the next
creator in the chain.
name: The (full, scope-influenced) name of the variable. The `name_prefix`
itself is stripped for the purposes of object-based dependency tracking,
but scopes opened within this scope are respected.
initial_value: See `variable_scope.variable_creator_scope`. Taken
explicitly so the argument can be re-named and used with
`Trackable._add_variable_with_custom_getter`.
trackable_parent: If not None, a more deeply nested trackable object and
its name prefix which were passed to `capture_dependencies` to add a
dependency on (rather than depending on the variable directly).
**kwargs: Passed through to the next creator.
Returns:
The output of `next_creator`: the fetched/created variable object.
"""
def _call_next_creator_renaming_initializer(initializer, **inner_kwargs):
inner_kwargs.pop("name") # Ignored; this is the scope-stripped name which
# we don't want to propagate.
return next_creator(initial_value=initializer, name=name, **inner_kwargs)
if name is not None and name.startswith(name_prefix):
scope_stripped_name = name[len(name_prefix) + 1:]
if not trackable_parent:
return template._add_variable_with_custom_getter( # pylint: disable=protected-access
initializer=initial_value,
name=scope_stripped_name,
getter=_call_next_creator_renaming_initializer,
# Disable error checking for Trackable. Exceptions are instead
# raised if necessary when the object-based saver tries to
# save/restore the object.
overwrite=True,
trackable_parent=(template, name_prefix),
**kwargs)
else:
parent_object, parent_name_prefix = trackable_parent
template._track_trackable( # pylint: disable=protected-access
parent_object,
name=parent_name_prefix[len(name_prefix) + 1:],
overwrite=True)
return next_creator(
name=name,
initial_value=initial_value,
trackable_parent=(template, name_prefix),
**kwargs)
with variable_scope.variable_creator_scope(_trackable_custom_creator):
yield
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def assert_existing_objects_matched(self):
"""Raises an exception unless existing Python objects have been matched."""
pass
@abc.abstractmethod
def assert_nontrivial_match(self):
"""Raises an exception if only the root object matched."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
def expect_partial(self):
"""Silence warnings about incomplete checkpoint restores."""
return self
def streaming_restore(status, session=None):
"""When graph building, runs restore ops as soon as they come in.
Args:
status: A _LoadStatus objects from an object-based saver's restore().
Streaming restore from name-based checkpoints is not currently supported.
session: A session to run new restore ops in.
"""
if context.executing_eagerly():
# Streaming restore is the default/only behavior when executing eagerly.
return
if session is None:
session = get_session()
if isinstance(status, NameBasedSaverStatus):
raise NotImplementedError(
"Streaming restore not supported from name-based checkpoints when "
"graph building. File a feature request if this limitation bothers "
"you. As a workaround, consider either using tf.train.Checkpoint to "
"load name-based checkpoints or enabling eager execution.")
status.run_restore_ops(session=session)
# pylint: disable=protected-access
status._checkpoint.new_restore_ops_callback = (
lambda ops: session.run(ops, feed_dict=status._feed_dict))
# pylint: enable=protected-access
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict, graph_view):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
self._graph_view = graph_view
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
pretty_printer = _ObjectGraphProtoPrettyPrinter(
self._checkpoint.object_graph_proto)
self.assert_existing_objects_matched()
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
trackable = self._checkpoint.object_by_proto_id.get(node_id, None)
if trackable is None:
raise AssertionError("Unresolved object in checkpoint {}: {}"
.format(pretty_printer.node_names[node_id], node))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" %
(self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
unused_attribute_messages = []
for node_id, attribute in six.iteritems(
self._checkpoint.unused_attributes):
obj = self._checkpoint.object_by_proto_id[node_id]
unused_attribute_messages.append(
"{} ({}): {}"
.format(pretty_printer.node_names[node_id], obj, attribute))
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but were not restored):\n{}")
.format("\n".join(unused_attribute_messages)))
return self
def assert_existing_objects_matched(self):
"""Asserts that trackable Python objects have been matched.
Note that this is a weaker assertion than `assert_consumed`. It will only
fail for existing Python objects which are (transitive) dependencies of the
root object and which do not have an entry in the checkpoint.
It will not fail, for example, if a `tf.keras.Layer` object has not yet been
built and so has not created any `tf.Variable` objects.
Returns:
`self` for chaining.
Raises:
AssertionError: If a Python object exists in the transitive dependencies
of the root object but does not have a value in the checkpoint.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
trackable = self._checkpoint.object_by_proto_id.get(node_id, None)
if (trackable is not None and
trackable._update_uid < self._checkpoint.restore_uid): # pylint: disable=protected-access
raise AssertionError("Object not assigned a value from checkpoint: %s" %
(node,))
for trackable_object in self._graph_view.list_objects():
# Remove data structures that do not contain any variables from
# restoration checks.
if (isinstance(trackable_object,
data_structures.TrackableDataStructure) and
not trackable_object._checkpoint_dependencies):
continue
self._checkpoint.all_python_objects.add(trackable_object)
unused_python_objects = (
object_identity.ObjectIdentitySet(self._checkpoint.all_python_objects) -
object_identity.ObjectIdentitySet(
self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
("Some Python objects were not bound to checkpointed values, likely "
"due to changes in the Python program: %s") %
(list(unused_python_objects),))
return self
def assert_nontrivial_match(self):
"""Raises an exception if only the root object matched."""
for trackable_object in self._graph_view.list_objects():
self._checkpoint.all_python_objects.add(trackable_object)
if len(self._checkpoint.object_by_proto_id) <= 1:
unused_python_objects = (
object_identity.ObjectIdentitySet(self._checkpoint.all_python_objects)
- object_identity.ObjectIdentitySet(
self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
("Nothing except the root object matched a checkpointed value. "
"Typically this means that the checkpoint does not match the "
"Python program. The following objects have no matching "
"checkpointed value: %s") % (list(unused_python_objects),))
else:
raise AssertionError(
"Nothing to load. No dependencies have been added to %s yet." %
(self._graph_view.root,))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = get_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Run operations to initialize or restore objects in the dependency graph.
Any objects in the dependency graph which have initializers but are not in
the checkpoint will have those initializers run, unless those variables are
being restored by a later call to `tf.train.Checkpoint.restore()`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
Args:
session: The session to run init/restore ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # Initialization and restoration ops are run eagerly
if session is None:
session = get_session()
all_objects = self._graph_view.list_objects()
already_initialized_objects = object_identity.ObjectIdentitySet(
self._checkpoint.object_by_proto_id.values())
initializers_for_non_restored_variables = [
c.initializer for c in all_objects
if hasattr(c, "initializer")
and c not in already_initialized_objects
and (getattr(c, "_update_uid", self._checkpoint.restore_uid - 1)
< self._checkpoint.restore_uid)]
self.run_restore_ops(session=session)
session.run(initializers_for_non_restored_variables)
def expect_partial(self):
"""Silence warnings about incomplete checkpoint restores."""
self._checkpoint.expect_partial = True
return self
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, graph_view, restore_uid):
self._restore_uid = restore_uid
self._graph_view = graph_view
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def assert_existing_objects_matched(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def assert_nontrivial_match(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Objects which would be saved by `Saver.save` will be initialized, unless
those variables are being restored by a later call to
`tf.train.Checkpoint.restore()`.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = get_session()
trackable_objects = self._graph_view.list_objects()
initializers = [
c.initializer for c in trackable_objects
if hasattr(c, "initializer") and c.initializer is not None
and (getattr(c, "_update_uid", self._restore_uid - 1)
< self._restore_uid)]
session.run(initializers)
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called when graph building. Prefer re-encoding training checkpoints in "
"the object-based format: run save() on the object-based saver (the same "
"one this message is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
# Ideally this deprecation decorator would be on the class, but that
# interferes with isinstance checks.
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def __init__(self, checkpoint, graph_view):
self._checkpoint = checkpoint
self._graph_view = graph_view
def assert_consumed(self):
"""Raises an exception if any variables/objects are unmatched."""
unused_attributes = dict(self._checkpoint.unused_attributes)
if unused_attributes:
raise AssertionError(
"Some objects had attributes which were not restored: %s" %
(unused_attributes,))
for trackable in self._graph_view.list_objects():
# pylint: disable=protected-access
trackable._maybe_initialize_trackable()
if trackable._update_uid < self._checkpoint.restore_uid:
raise AssertionError("Object not restored: %s" % (trackable,))
# pylint: enable=protected-access
return self
def assert_existing_objects_matched(self):
"""Raises an exception if currently created objects are unmatched."""
# For name-based checkpoints there's no object information in the
# checkpoint, so there's no distinction between
# assert_existing_objects_matched and assert_consumed (and both are less
# useful since we don't touch Python objects or Python state).
return self.assert_consumed()
def assert_nontrivial_match(self):
"""Raises an exception if currently created objects are unmatched."""
# For name-based checkpoints there's no object information in the
# checkpoint, so there's no distinction between
# assert_nontrivial_match and assert_consumed (and both are less
# useful since we don't touch Python objects or Python state).
return self.assert_consumed()
def _gather_saveable_objects(self):
"""Walk the object graph, using global names for SaveableObjects."""
objects = self._graph_view.list_objects()
saveable_objects = []
for trackable in objects:
# pylint: disable=protected-access
trackable._maybe_initialize_trackable()
if trackable._update_uid < self._checkpoint.restore_uid:
trackable._update_uid = self._checkpoint.restore_uid
else:
continue
# pylint: enable=protected-access
saveable_objects.extend(
self._checkpoint.globally_named_object_attributes(trackable))
return saveable_objects
def run_restore_ops(self, session=None):
"""Load the name-based checkpoint using a new `tf.compat.v1.train.Saver`."""
if context.executing_eagerly():
return # Nothing to do, variables are restored on creation.
if session is None:
session = get_session()
with ops.device("/cpu:0"):
saveables = self._gather_saveable_objects()
v1_saver_lib.Saver(saveables).restore(
sess=session, save_path=self._checkpoint.save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
class TrackableSaver(object):
"""Saves and restores a `Trackable` object and its dependencies.
See `Trackable` for details of dependency management. `Saver` wraps
`tf.compat.v1.train.Saver` for saving, including extra information about the
graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Trackable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Trackable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from
`tf.compat.v1.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, graph_view):
"""Configure saving.
Args:
graph_view: A `GraphView` object containing a description of the object
graph to save.
"""
# The file prefix placeholder is created lazily when graph building (and not
# at all when executing eagerly) to avoid creating ops in the constructor
# (when they may never be necessary).
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._file_prefix_feed_tensor = None
self._cached_save_operation = None
# Op caching for restore, shared between _CheckpointRestoreCoordinators
self._restore_op_cache = {}
self._graph_view = graph_view
def _gather_saveables(self, object_graph_tensor=None):
"""Wraps _serialize_object_graph to include the object graph proto."""
(named_saveable_objects, graph_proto,
feed_additions) = self._graph_view.serialize_object_graph()
if object_graph_tensor is None:
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
else:
feed_additions.update(
{object_graph_tensor: graph_proto.SerializeToString()})
assert base.OBJECT_GRAPH_PROTO_KEY not in named_saveable_objects
named_saveable_objects.append(
base.NoRestoreSaveable(
tensor=object_graph_tensor, name=base.OBJECT_GRAPH_PROTO_KEY))
return named_saveable_objects, graph_proto, feed_additions
def _save_cached_when_graph_building(self,
file_prefix,
object_graph_tensor=None):
"""Create or retrieve save ops.
Args:
file_prefix: The prefix for saved checkpoint files.
object_graph_tensor: A `Tensor` to which the current object graph will be
fed.
Returns:
A two-element tuple with a filename tensor and a feed_dict of tensors to
feed when running it (if graph building). The feed dict contains the
current object graph and any Python state to be saved in the
checkpoint. When executing eagerly only the first argument is meaningful.
"""
(named_saveable_objects, graph_proto,
feed_additions) = self._gather_saveables(
object_graph_tensor=object_graph_tensor)
if (self._last_save_object_graph != graph_proto
# When executing eagerly, we need to re-create SaveableObjects each time
# save() is called so they pick up new Tensors passed to their
# constructors. That means the Saver needs to be copied with a new
# var_list.
or context.executing_eagerly() or ops.inside_function()):
saver = functional_saver.MultiDeviceSaver(named_saveable_objects)
save_op = saver.save(file_prefix)
with ops.device("/cpu:0"):
with ops.control_dependencies([save_op]):
self._cached_save_operation = array_ops.identity(file_prefix)
self._last_save_object_graph = graph_proto
return self._cached_save_operation, feed_additions
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Trackable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_trackable` or one of its dependencies (via
`Trackable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
feed_dict = {}
use_session = (not context.executing_eagerly() and
not ops.inside_function())
if checkpoint_number:
file_prefix = "%s-%d" % (file_prefix, checkpoint_number)
if use_session:
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
self._file_prefix_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
file_prefix_tensor = self._file_prefix_feed_tensor
feed_dict[file_prefix_tensor] = file_prefix
else:
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(
file_prefix, dtype=dtypes.string)
object_graph_tensor = None
file_io.recursive_create_dir(os.path.dirname(file_prefix))
save_path, new_feed_additions = self._save_cached_when_graph_building(
file_prefix=file_prefix_tensor, object_graph_tensor=object_graph_tensor)
if new_feed_additions:
feed_dict.update(new_feed_additions)
if not use_session:
session = None
elif session is None:
session = get_session()
if session:
return session.run(save_path, feed_dict=feed_dict)
else:
return save_path
def restore(self, save_path):
"""Restore a training checkpoint.
Restores `root_trackable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_trackable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run.
To disallow deferred loading, assert immediately that all checkpointed
variables have been matched to variable objects:
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
An exception will be raised unless every object was matched and its
variables already exist.
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency graph.
If the checkpoint was written by the name-based
`tf.compat.v1.train.Saver`, names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
"""
if save_path is None:
return InitializationOnlyStatus(self._graph_view, ops.uid())
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
graph_building = not context.executing_eagerly()
if graph_building:
dtype_map = None
else:
dtype_map = reader.get_variable_to_dtype_map()
try:
object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try the
# name-based compatibility mode.
restore_coordinator = _NameBasedRestoreCoordinator(
save_path=save_path, dtype_map=dtype_map)
if not graph_building:
for existing_trackable in self._graph_view.list_objects():
# pylint: disable=protected-access
existing_trackable._maybe_initialize_trackable()
existing_trackable._name_based_restores.add(restore_coordinator)
existing_trackable._name_based_attribute_restore(restore_coordinator)
# pylint: enable=protected-access
return NameBasedSaverStatus(
restore_coordinator, graph_view=self._graph_view)
if graph_building:
if self._file_prefix_placeholder is None:
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
checkpoint = _CheckpointRestoreCoordinator(
object_graph_proto=object_graph_proto,
save_path=save_path,
save_path_tensor=file_prefix_tensor,
restore_op_cache=self._restore_op_cache,
graph_view=self._graph_view)
base.CheckpointPosition(
checkpoint=checkpoint, proto_id=0).restore(self._graph_view.root)
load_status = CheckpointLoadStatus(
checkpoint,
graph_view=self._graph_view,
feed_dict=file_prefix_feed_dict)
return load_status
def frozen_saver(root_trackable):
"""Creates a static `tf.compat.v1.train.Saver` from a trackable object.
The returned `Saver` saves object-based checkpoints, but these checkpoints
will no longer reflect structural changes to the object graph, only changes to
the values of `Variable`s added as dependencies of the root object before
`freeze` was called.
`restore` works on the returned `Saver`, but requires that the object graph of
the checkpoint being loaded exactly matches the object graph when `freeze` was
called. This is in contrast the object-based restore performed by
`tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's
object graph and the current Python object graph.
Args:
root_trackable: A trackable object to save.
Returns:
A saver which saves object-based checkpoints for the object graph frozen at
the time `frozen_saver` was called.
"""
named_saveable_objects = graph_view_lib.ObjectGraphView(
root_trackable).frozen_saveable_objects()
return functional_saver.MultiDeviceSaver(named_saveable_objects)
def saver_with_op_caching(obj):
"""A TrackableSaver with a SaveableObject cache when graph building."""
if context.executing_eagerly():
saveables_cache = None
else:
saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary()
return TrackableSaver(
graph_view_lib.ObjectGraphView(
weakref.ref(obj), saveables_cache=saveables_cache))
# Mentions graph building / Sessions. The v2 version is below.
@tf_export(v1=["train.Checkpoint"])
class CheckpointV1(tracking.AutoTrackable):
"""Groups trackable objects, saving and restoring them.
`Checkpoint`'s constructor accepts keyword arguments whose values are types
that contain trackable state, such as `tf.compat.v1.train.Optimizer`
implementations, `tf.Variable`, `tf.keras.Layer` implementations, or
`tf.keras.Model` implementations. It saves these values with a checkpoint, and
maintains a `save_counter` for numbering checkpoints.
Example usage when graph building:
```python
import tensorflow as tf
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
train_op = optimizer.minimize( ... )
status.assert_consumed() # Optional sanity checks.
with tf.compat.v1.Session() as session:
# Use the Session to restore variables, or initialize them if
# tf.train.latest_checkpoint returned None.
status.initialize_or_restore(session)
for _ in range(num_training_steps):
session.run(train_op)
checkpoint.save(file_prefix=checkpoint_prefix)
```
Example usage with eager execution enabled:
```python
import tensorflow as tf
import os
tf.compat.v1.enable_eager_execution()
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... ) # Variables will be restored on creation.
status.assert_consumed() # Optional sanity checks.
checkpoint.save(file_prefix=checkpoint_prefix)
```
`Checkpoint.save` and `Checkpoint.restore` write and read object-based
checkpoints, in contrast to `tf.compat.v1.train.Saver` which writes and reads
`variable.name` based checkpoints. Object-based checkpointing saves a graph of
dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,
etc.) with named edges, and this graph is used to match variables when
restoring a checkpoint. It can be more robust to changes in the Python
program, and helps to support restore-on-create for variables when executing
eagerly. Prefer `tf.train.Checkpoint` over `tf.compat.v1.train.Saver` for new
code.
`Checkpoint` objects have dependencies on the objects passed as keyword
arguments to their constructors, and each dependency is given a name that is
identical to the name of the keyword argument for which it was created.
TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
dependencies on their variables (e.g. "kernel" and "bias" for
`tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
dependencies easy in user-defined classes, since `Model` hooks into attribute
assignment. For example:
```python
class Regress(tf.keras.Model):
def __init__(self):
super(Regress, self).__init__()
self.input_transform = tf.keras.layers.Dense(10)
# ...
def call(self, inputs):
x = self.input_transform(inputs)
# ...
```
This `Model` has a dependency named "input_transform" on its `Dense` layer,
which in turn depends on its variables. As a result, saving an instance of
`Regress` using `tf.train.Checkpoint` will also save all the variables created
by the `Dense` layer.
When variables are assigned to multiple workers, each worker writes its own
section of the checkpoint. These sections are then merged/re-indexed to behave
as a single checkpoint. This avoids copying all variables to one worker, but
does require that all workers see a common filesystem.
While `tf.keras.Model.save_weights` and `tf.train.Checkpoint.save` save in the
same format, note that the root of the resulting checkpoint is the object the
save method is attached to. This means saving a `tf.keras.Model` using
`save_weights` and loading into a `tf.train.Checkpoint` with a `Model`
attached (or vice versa) will not match the `Model`'s variables. See the
[guide to training
checkpoints](https://www.tensorflow.org/alpha/guide/checkpoints) for
details. Prefer `tf.train.Checkpoint` over `tf.keras.Model.save_weights` for
training checkpoints.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Values must be trackable objects.
Raises:
ValueError: If objects in `kwargs` are not trackable.
"""
super(CheckpointV1, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, (base.Trackable, def_function.Function)):
raise ValueError(
("`Checkpoint` was expecting a trackable object (an object "
"derived from `TrackableBase`), got %s. If you believe this "
"object should be trackable (i.e. it is part of the "
"TensorFlow Python API and manages state), please open an issue.")
% (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._save_assign_op = None
self._saver = saver_with_op_caching(self)
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
# add_variable creates a dependency named "save_counter"; NoDependency
# prevents creating a second dependency named "_save_counter".
self._save_counter = data_structures.NoDependency(
add_variable(
self,
name="save_counter",
initializer=0,
dtype=dtypes.int64,
trainable=False))
def write(self, file_prefix, session=None):
"""Writes a training checkpoint.
The checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.write()` is
called.
`write` does not number checkpoints, increment `save_counter`, or update the
metadata used by `tf.train.latest_checkpoint`. It is primarily intended for
use by higher level checkpoint management utilities. `save` provides a very
basic implementation of these features.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint (i.e. `file_prefix`).
"""
output = self._saver.save(file_prefix=file_prefix, session=session)
if tensor_util.is_tensor(output):
if context.executing_eagerly():
return compat.as_str(output.numpy())
else:
# Function building
return output
else:
# Graph + Session, so we already session.ran it.
return compat.as_str(output)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Saves a training checkpoint and provides basic checkpoint management.
The saved checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.save()` is
called.
`save` is a basic convenience wrapper around the `write` method,
sequentially numbering checkpoints using `save_counter` and updating the
metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint
management, for example garbage collection and custom numbering, may be
provided by other utilities which also wrap `write`
(`tf.contrib.checkpoint.CheckpointManager` for example).
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
graph_building = not context.executing_eagerly()
if graph_building:
if ops.inside_function():
raise NotImplementedError(
"Calling tf.train.Checkpoint.save() from a function is not "
"supported, as save() modifies saving metadata in ways not "
"supported by TensorFlow Operations. Consider using "
"tf.train.Checkpoint.write(), a lower-level API which does not "
"update metadata. tf.train.latest_checkpoint and related APIs will "
"not see this checkpoint.")
if session is None:
session = get_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
if not graph_building or self._save_assign_op is None:
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1, read_value=True)
if graph_building:
self._save_assign_op = data_structures.NoDependency(assign_op)
if graph_building:
checkpoint_number = session.run(self._save_assign_op)
else:
checkpoint_number = assign_op.numpy()
file_path = self.write(
"%s-%d" % (file_prefix, checkpoint_number), session=session)
checkpoint_management.update_checkpoint_state_internal(
save_dir=os.path.dirname(file_prefix),
model_checkpoint_path=file_path,
all_model_checkpoint_paths=[file_path],
save_relative_paths=True)
return file_path
def restore(self, save_path):
"""Restore a training checkpoint.
Restores this `Checkpoint` and any objects it depends on.
When executing eagerly, either assigns values immediately if variables to
restore have been created already, or defers restoration until the variables
are created. Dependencies added after this call will be matched if they have
a corresponding object in the checkpoint (the restore request will queue in
any trackable object waiting for the expected dependency to be added).
When graph building, restoration ops are added to the graph but not run
immediately.
To ensure that loading is complete and no more assignments will take place,
use the `assert_consumed()` method of the status object returned by
`restore`:
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path).assert_consumed()
```
An exception will be raised if any Python objects in the dependency graph
were not found in the checkpoint, or if any checkpointed values do not have
a matching Python object.
When graph building, `assert_consumed()` indicates that all of the restore
ops that will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` method of the status object:
```python
checkpoint.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this
method. Names are used to match variables. No restore ops are created/run
until `run_restore_ops()` or `initialize_or_restore()` are called on the
returned status object when graph building, but there is restore-on-creation
when executing eagerly. Re-encode name-based checkpoints using
`tf.train.Checkpoint.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency graph.
If the checkpoint was written by the name-based
`tf.compat.v1.train.Saver`, names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration and run initialization/restore ops.
The returned status object has the following methods:
* `assert_consumed()`:
Raises an exception if any variables/objects are unmatched: either
checkpointed values which don't have a matching Python object or
Python objects in the dependency graph with no values in the
checkpoint. This method returns the status object, and so may be
chained with `initialize_or_restore` or `run_restore_ops`.
* `assert_existing_objects_matched()`:
Raises an exception if any existing Python objects in the dependency
graph are unmatched. Unlike `assert_consumed`, this assertion will
pass if values in the checkpoint have no corresponding Python
objects. For example a `tf.keras.Layer` object which has not yet been
built, and so has not created any variables, will pass this assertion
but fail `assert_consumed`. Useful when loading part of a larger
checkpoint into a new Python program, e.g. a training checkpoint with
a `tf.compat.v1.train.Optimizer` was saved but only the state required
for
inference is being loaded. This method returns the status object, and
so may be chained with `initialize_or_restore` or `run_restore_ops`.
* `assert_nontrivial_match()`: Asserts that something aside from the root
object was matched. This is a very weak assertion, but is useful for
sanity checking in library code where objects may exist in the
checkpoint which haven't been created in Python and some Python
objects may not have a checkpointed value.
* `expect_partial()`: Silence warnings about incomplete checkpoint
restores. Warnings are otherwise printed for unused parts of the
checkpoint file or object when the `Checkpoint` object is deleted
(often at program shutdown).
* `initialize_or_restore(session=None)`:
When graph building, runs variable initializers if `save_path` is
`None`, but otherwise runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect when
executing eagerly (variables are initialized or restored eagerly).
* `run_restore_ops(session=None)`:
When graph building, runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect when
executing eagerly (restore operations are run eagerly). May only be
called when `save_path` is not `None`.
"""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status
@tf_export("train.Checkpoint", v1=[])
class Checkpoint(tracking.AutoTrackable):
"""Groups trackable objects, saving and restoring them.
`Checkpoint`'s constructor accepts keyword arguments whose values are types
that contain trackable state, such as `tf.keras.optimizers.Optimizer`
implementations, `tf.Variable`, `tf.keras.Layer` implementations, or
`tf.keras.Model` implementations. It saves these values with a checkpoint, and
maintains a `save_counter` for numbering checkpoints.
Example usage:
```python
import tensorflow as tf
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... ) # Variables will be restored on creation.
status.assert_consumed() # Optional sanity checks.
checkpoint.save(file_prefix=checkpoint_prefix)
```
`Checkpoint.save` and `Checkpoint.restore` write and read object-based
checkpoints, in contrast to TensorFlow 1.x's `tf.compat.v1.train.Saver` which
writes and
reads `variable.name` based checkpoints. Object-based checkpointing saves a
graph of dependencies between Python objects (`Layer`s, `Optimizer`s,
`Variable`s, etc.) with named edges, and this graph is used to match variables
when restoring a checkpoint. It can be more robust to changes in the Python
program, and helps to support restore-on-create for variables.
`Checkpoint` objects have dependencies on the objects passed as keyword
arguments to their constructors, and each dependency is given a name that is
identical to the name of the keyword argument for which it was created.
TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
dependencies on their variables (e.g. "kernel" and "bias" for
`tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
dependencies easy in user-defined classes, since `Model` hooks into attribute
assignment. For example:
```python
class Regress(tf.keras.Model):
def __init__(self):
super(Regress, self).__init__()
self.input_transform = tf.keras.layers.Dense(10)
# ...
def call(self, inputs):
x = self.input_transform(inputs)
# ...
```
This `Model` has a dependency named "input_transform" on its `Dense` layer,
which in turn depends on its variables. As a result, saving an instance of
`Regress` using `tf.train.Checkpoint` will also save all the variables created
by the `Dense` layer.
When variables are assigned to multiple workers, each worker writes its own
section of the checkpoint. These sections are then merged/re-indexed to behave
as a single checkpoint. This avoids copying all variables to one worker, but
does require that all workers see a common filesystem.
While `tf.keras.Model.save_weights` and `tf.train.Checkpoint.save` save in the
same format, note that the root of the resulting checkpoint is the object the
save method is attached to. This means saving a `tf.keras.Model` using
`save_weights` and loading into a `tf.train.Checkpoint` with a `Model`
attached (or vice versa) will not match the `Model`'s variables. See the
[guide to training
checkpoints](https://www.tensorflow.org/alpha/guide/checkpoints) for
details. Prefer `tf.train.Checkpoint` over `tf.keras.Model.save_weights` for
training checkpoints.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Values must be trackable objects.
Raises:
ValueError: If objects in `kwargs` are not trackable.
"""
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, (base.Trackable, def_function.Function)):
raise ValueError(
("`Checkpoint` was expecting a trackable object (an object "
"derived from `TrackableBase`), got %s. If you believe this "
"object should be trackable (i.e. it is part of the "
"TensorFlow Python API and manages state), please open an issue.")
% (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._save_assign_op = None
self._saver = saver_with_op_caching(self)
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
# add_variable creates a dependency named "save_counter"; NoDependency
# prevents creating a second dependency named "_save_counter".
self._save_counter = data_structures.NoDependency(
add_variable(
self,
name="save_counter",
initializer=0,
dtype=dtypes.int64,
trainable=False))
def write(self, file_prefix):
"""Writes a training checkpoint.
The checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.write()` is
called.
`write` does not number checkpoints, increment `save_counter`, or update the
metadata used by `tf.train.latest_checkpoint`. It is primarily intended for
use by higher level checkpoint management utilities. `save` provides a very
basic implementation of these features.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix).
Returns:
The full path to the checkpoint (i.e. `file_prefix`).
"""
output = self._saver.save(file_prefix=file_prefix)
if tensor_util.is_tensor(output):
if context.executing_eagerly():
return compat.as_str(output.numpy())
else:
# Function building
return output
else:
# Graph + Session, so we already session.ran it.
return compat.as_str(output)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix):
"""Saves a training checkpoint and provides basic checkpoint management.
The saved checkpoint includes variables created by this object and any
trackable objects it depends on at the time `Checkpoint.save()` is
called.
`save` is a basic convenience wrapper around the `write` method,
sequentially numbering checkpoints using `save_counter` and updating the
metadata used by `tf.train.latest_checkpoint`. More advanced checkpoint
management, for example garbage collection and custom numbering, may be
provided by other utilities which also wrap `write`
(`tf.contrib.checkpoint.CheckpointManager` for example).
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
Returns:
The full path to the checkpoint.
"""
graph_building = not context.executing_eagerly()
if graph_building:
if ops.inside_function():
raise NotImplementedError(
"Calling tf.train.Checkpoint.save() from a function is not "
"supported, as save() modifies saving metadata in ways not "
"supported by TensorFlow Operations. Consider using "
"tf.train.Checkpoint.write(), a lower-level API which does not "
"update metadata. tf.train.latest_checkpoint and related APIs will "
"not see this checkpoint.")
session = get_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
if not graph_building or self._save_assign_op is None:
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1, read_value=True)
if graph_building:
self._save_assign_op = data_structures.NoDependency(assign_op)
if graph_building:
checkpoint_number = session.run(self._save_assign_op)
else:
checkpoint_number = assign_op.numpy()
file_path = self.write("%s-%d" % (file_prefix, checkpoint_number))
checkpoint_management.update_checkpoint_state_internal(
save_dir=os.path.dirname(file_prefix),
model_checkpoint_path=file_path,
all_model_checkpoint_paths=[file_path],
save_relative_paths=True)
return file_path
def restore(self, save_path):
"""Restore a training checkpoint.
Restores this `Checkpoint` and any objects it depends on.
Either assigns values immediately if variables to restore have been created
already, or defers restoration until the variables are created. Dependencies
added after this call will be matched if they have a corresponding object in
the checkpoint (the restore request will queue in any trackable object
waiting for the expected dependency to be added).
To ensure that loading is complete and no more assignments will take place,
use the `assert_consumed()` method of the status object returned by
`restore`:
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path).assert_consumed()
```
An exception will be raised if any Python objects in the dependency graph
were not found in the checkpoint, or if any checkpointed values do not have
a matching Python object.
Name-based `tf.compat.v1.train.Saver` checkpoints from TensorFlow 1.x can be
loaded
using this method. Names are used to match variables. Re-encode name-based
checkpoints using `tf.train.Checkpoint.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency graph.
If the checkpoint was written by the name-based
`tf.compat.v1.train.Saver`, names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration.
The returned status object has the following methods:
* `assert_consumed()`:
Raises an exception if any variables/objects are unmatched: either
checkpointed values which don't have a matching Python object or
Python objects in the dependency graph with no values in the
checkpoint. This method returns the status object, and so may be
chained with other assertions.
* `assert_existing_objects_matched()`:
Raises an exception if any existing Python objects in the dependency
graph are unmatched. Unlike `assert_consumed`, this assertion will
pass if values in the checkpoint have no corresponding Python
objects. For example a `tf.keras.Layer` object which has not yet been
built, and so has not created any variables, will pass this assertion
but fail `assert_consumed`. Useful when loading part of a larger
checkpoint into a new Python program, e.g. a training checkpoint with
a `tf.compat.v1.train.Optimizer` was saved but only the state required
for
inference is being loaded. This method returns the status object, and
so may be chained with other assertions.
* `assert_nontrivial_match()`: Asserts that something aside from the root
object was matched. This is a very weak assertion, but is useful for
sanity checking in library code where objects may exist in the
checkpoint which haven't been created in Python and some Python
objects may not have a checkpointed value.
* `expect_partial()`: Silence warnings about incomplete checkpoint
restores. Warnings are otherwise printed for unused parts of the
checkpoint file or object when the `Checkpoint` object is deleted
(often at program shutdown).
"""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status
|
tensorflow-master
|
tensorflow/python/training/tracking/util.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import numpy
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.module import module
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import python_state
from tensorflow.python.training.tracking import util
class _NumpyState(module.Module):
"""A checkpointable object whose NumPy array attributes are saved/restored.
Example usage:
```python
arrays = _NumpyState()
checkpoint = tf.train.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save("/tmp/ckpt")
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
assert (arrays.x == numpy.zeros([3, 4])).all()
second_checkpoint = tf.train.Checkpoint(
numpy_arrays=_NumpyState())
# Attributes of NumpyState objects are created automatically by restore()
second_checkpoint.restore(save_path)
assert (second_checkpoint.numpy_arrays.x == numpy.zeros([3, 4])).all()
```
Note that `NumpyState` objects re-create the attributes of the previously
saved object on `restore()`. This is in contrast to TensorFlow variables, for
which a `Variable` object must be created and assigned to an attribute.
This snippet works both when graph building and when executing eagerly. On
save, the NumPy array(s) are fed as strings to be saved in the checkpoint (via
a placeholder when graph building, or as a string constant when executing
eagerly). When restoring they skip the TensorFlow graph entirely, and so no
restore ops need be run. This means that restoration always happens eagerly,
rather than waiting for `checkpoint.restore(...).run_restore_ops()` like
TensorFlow variables when graph building.
"""
def __init__(self):
super(_NumpyState, self).__setattr__("_arrays", module.Module())
def __getattribute__(self, name):
"""Un-wrap `_NumpyWrapper` objects when accessing attributes."""
try:
arrays = super(_NumpyState, self).__getattribute__("_arrays")
except AttributeError:
# _arrays hasn't been assigned yet
return super(_NumpyState, self).__getattribute__(name)
try:
value = getattr(arrays, name)
except AttributeError:
dummy_array = numpy.array([])
setattr(arrays, name, _NumpyWrapper(dummy_array))
value = getattr(arrays, name)
if value.array is dummy_array:
# No set or restored attribute with this name
delattr(arrays, name)
return super(_NumpyState, self).__getattribute__(name)
if isinstance(value, _NumpyWrapper):
return value.array
return super(_NumpyState, self).__getattribute__(name)
def __setattr__(self, name, value):
"""Automatically wrap NumPy arrays assigned to attributes."""
if isinstance(value, (numpy.ndarray, numpy.generic)):
try:
existing = getattr(self._arrays, name)
existing.array = value
return
except AttributeError:
value = _NumpyWrapper(value)
setattr(self._arrays, name, value)
return
super(_NumpyState, self).__setattr__(name, value)
class _NumpyWrapper(python_state.PythonState):
"""Wraps a NumPy array for storage in an object-based checkpoint."""
def __init__(self, array):
"""Specify a NumPy array to wrap.
Args:
array: The NumPy array to save and restore (may be overwritten).
"""
self.array = array
def serialize(self):
"""Callback to serialize the array."""
string_file = io.BytesIO()
try:
numpy.save(string_file, self.array, allow_pickle=False)
serialized = string_file.getvalue()
finally:
string_file.close()
return serialized
def deserialize(self, string_value):
"""Callback to deserialize the array."""
string_file = io.BytesIO(string_value)
try:
self.array = numpy.load(string_file, allow_pickle=False)
finally:
string_file.close()
class NumpyStateTests(test.TestCase):
def testWrapper(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
root = util.Checkpoint(numpy=_NumpyWrapper(numpy.array([1.])))
save_path = root.save(prefix)
root.numpy.array *= 2.
self.assertEqual([2.], root.numpy.array)
root.restore(save_path)
self.assertEqual([1.], root.numpy.array)
@test_util.run_in_graph_and_eager_modes
def testSaveRestoreNumpyState(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
save_state = _NumpyState()
saver = util.Checkpoint(numpy=save_state)
save_state.a = numpy.ones([2, 2])
save_state.b = numpy.ones([2, 2])
save_state.b = numpy.zeros([2, 2])
save_state.c = numpy.int64(3)
self.assertAllEqual(numpy.ones([2, 2]), save_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), save_state.b)
self.assertEqual(3, save_state.c)
first_save_path = saver.save(prefix)
save_state.a[1, 1] = 2.
save_state.c = numpy.int64(4)
second_save_path = saver.save(prefix)
load_state = _NumpyState()
loader = util.Checkpoint(numpy=load_state)
loader.restore(first_save_path).initialize_or_restore()
self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
self.assertEqual(3, load_state.c)
load_state.a[0, 0] = 42.
self.assertAllEqual([[42., 1.], [1., 1.]], load_state.a)
loader.restore(first_save_path).run_restore_ops()
self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
loader.restore(second_save_path).run_restore_ops()
self.assertAllEqual([[1., 1.], [1., 2.]], load_state.a)
self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
self.assertEqual(4, load_state.c)
def testNoGraphPollution(self):
graph = ops.Graph()
with graph.as_default(), session.Session():
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
save_state = _NumpyState()
saver = util.Checkpoint(numpy=save_state)
save_state.a = numpy.ones([2, 2])
save_path = saver.save(prefix)
saver.restore(save_path)
graph.finalize()
saver.save(prefix)
save_state.a = numpy.zeros([2, 2])
saver.save(prefix)
saver.restore(save_path)
@test_util.run_in_graph_and_eager_modes
def testDocstringExample(self):
arrays = _NumpyState()
checkpoint = util.Checkpoint(numpy_arrays=arrays)
arrays.x = numpy.zeros([3, 4])
save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
arrays.x[1, 1] = 4.
checkpoint.restore(save_path)
self.assertAllEqual(numpy.zeros([3, 4]), arrays.x)
second_checkpoint = util.Checkpoint(numpy_arrays=_NumpyState())
second_checkpoint.restore(save_path)
self.assertAllEqual(numpy.zeros([3, 4]), second_checkpoint.numpy_arrays.x)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/python_state_test.py
|
"""Manages a graph of Trackable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
from tensorflow.core.protobuf import trackable_object_graph_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.training import optimizer as optimizer_v1
from tensorflow.python.training.saving import saveable_object as saveable_object_lib
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import tracking
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(trackable.name)
for trackable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(trackable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(trackable_objects)
slot_variables = object_identity.ObjectIdentityDictionary()
for trackable in non_slot_objects:
if (isinstance(trackable, optimizer_v1.Optimizer)
# TODO(b/110718070): Fix Keras imports.
or hasattr(trackable, "_create_or_restore_slot_variable")):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[trackable])
slot_names = trackable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = trackable.get_slot(
original_variable, slot_name)
except (AttributeError, KeyError):
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_trackable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO(allenl): Gather dependencies of slot variables.
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Trackable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(trackable_objects)
node_ids[slot_variable] = slot_variable_node_id
trackable_objects.append(slot_variable)
slot_variable_proto = (
trackable_object_graph_pb2.TrackableObjectGraph
.TrackableObject.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(trackable, []).append(
slot_variable_proto)
return slot_variables
class ObjectGraphView(object):
"""Gathers and serializes an object graph."""
def __init__(self, root, saveables_cache=None):
"""Configure the graph view.
Args:
root: A `Trackable` object whose variables (including the variables
of dependencies, recursively) should be saved. May be a weak reference.
saveables_cache: A dictionary mapping `Trackable` objects ->
attribute names -> SaveableObjects, used to avoid re-creating
SaveableObjects when graph building.
"""
self._root_ref = root
self._saveables_cache = saveables_cache
def list_dependencies(self, obj):
# pylint: disable=protected-access
obj._maybe_initialize_trackable()
return obj._checkpoint_dependencies
# pylint: enable=protected-access
@property
def saveables_cache(self):
"""Maps Trackable objects -> attribute names -> list(SaveableObjects).
Used to avoid re-creating SaveableObjects when graph building. None when
executing eagerly.
Returns:
The cache (an object-identity dictionary), or None if caching is disabled.
"""
return self._saveables_cache
@property
def root(self):
if isinstance(self._root_ref, weakref.ref):
derefed = self._root_ref()
assert derefed is not None
return derefed
else:
return self._root_ref
def _breadth_first_traversal(self):
"""Find shortest paths to all dependencies of self.root."""
bfs_sorted = []
to_visit = collections.deque([self.root])
path_to_root = object_identity.ObjectIdentityDictionary()
path_to_root[self.root] = ()
while to_visit:
current_trackable = to_visit.popleft()
if isinstance(current_trackable, tracking.NotTrackable):
raise NotImplementedError(
("The object %s does not support object-based saving. File a "
"feature request if this limitation bothers you. In the meantime, "
"you can remove the dependency on this object and save everything "
"else.")
% (current_trackable,))
bfs_sorted.append(current_trackable)
for name, dependency in self.list_dependencies(current_trackable):
if dependency not in path_to_root:
path_to_root[dependency] = (
path_to_root[current_trackable] + (
base.TrackableReference(name, dependency),))
to_visit.append(dependency)
return bfs_sorted, path_to_root
def _add_attributes_to_object_graph(
self, trackable_objects, object_graph_proto, node_ids, object_names,
object_map):
"""Create SaveableObjects and corresponding SerializedTensor protos."""
named_saveable_objects = []
if self._saveables_cache is None:
# No SaveableObject caching. Either we're executing eagerly, or building a
# static save which is specialized to the current Python state.
feed_additions = None
else:
# If we are caching SaveableObjects, we need to build up a feed_dict with
# functions computing volatile Python state to be saved with the
# checkpoint.
feed_additions = {}
for checkpoint_id, (trackable, object_proto) in enumerate(
zip(trackable_objects, object_graph_proto.nodes)):
assert node_ids[trackable] == checkpoint_id
object_name = object_names[trackable]
if object_map is None:
object_to_save = trackable
else:
object_to_save = object_map.get(trackable, trackable)
if self._saveables_cache is not None:
cached_attributes = self._saveables_cache.setdefault(object_to_save, {})
else:
cached_attributes = None
for name, saveable_factory in (
object_to_save._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
if cached_attributes is None:
saveables = None
else:
saveables = cached_attributes.get(name, None)
if saveables is not None:
for saveable in saveables:
if attribute.checkpoint_key not in saveable.name:
# The checkpoint key for this SaveableObject is different. We
# need to re-create it.
saveables = None
del cached_attributes[name]
break
if saveables is None:
if callable(saveable_factory):
maybe_saveable = saveable_factory(name=attribute.checkpoint_key)
else:
maybe_saveable = saveable_factory
if isinstance(maybe_saveable, saveable_object_lib.SaveableObject):
saveables = (maybe_saveable,)
else:
# Figure out the name-based Saver's name for this variable. If it's
# already a SaveableObject we'd just get the checkpoint key back, so
# we leave full_name blank.
saver_dict = saveable_object_util.op_list_to_dict(
[maybe_saveable], convert_variable_to_tensor=False)
full_name, = saver_dict.keys()
saveables = tuple(saveable_object_util.saveable_objects_for_op(
op=maybe_saveable, name=attribute.checkpoint_key))
for saveable in saveables:
saveable.full_name = full_name
for saveable in saveables:
if attribute.checkpoint_key not in saveable.name:
raise AssertionError(
("The object %s produced a SaveableObject with name '%s' for "
"attribute '%s'. Expected a name containing '%s'.")
% (trackable, name, saveable.name,
attribute.checkpoint_key))
if cached_attributes is not None:
cached_attributes[name] = saveables
optional_restore = None
for saveable in saveables:
if optional_restore is None:
optional_restore = saveable.optional_restore
else:
optional_restore = optional_restore and saveable.optional_restore
if hasattr(saveable, "full_name"):
attribute.full_name = saveable.full_name
if isinstance(saveable, base.PythonStateSaveable):
if feed_additions is None:
assert self._saveables_cache is None
# If we're not caching saveables, then we're either executing
# eagerly or building a static save/restore (e.g. for a
# SavedModel). In either case, we should embed the current Python
# state in the graph rather than relying on a feed dict.
saveable = saveable.freeze()
else:
saveable_feed_dict = saveable.feed_dict_additions()
for new_feed_key in saveable_feed_dict.keys():
if new_feed_key in feed_additions:
raise AssertionError(
("The object %s tried to feed a value for the Tensor %s "
"when saving, but another object is already feeding a "
"value.")
% (trackable, new_feed_key))
feed_additions.update(saveable_feed_dict)
named_saveable_objects.append(saveable)
if optional_restore is None:
optional_restore = False
attribute.optional_restore = optional_restore
return named_saveable_objects, feed_additions
def _fill_object_graph_proto(self, trackable_objects,
node_ids,
slot_variables,
object_graph_proto=None):
"""Name non-slot `Trackable`s and add them to `object_graph_proto`."""
if object_graph_proto is None:
object_graph_proto = (
trackable_object_graph_pb2.TrackableObjectGraph())
for checkpoint_id, trackable in enumerate(trackable_objects):
assert node_ids[trackable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(trackable, ()))
for child in self.list_dependencies(trackable):
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return object_graph_proto
def _serialize_gathered_objects(self, trackable_objects, path_to_root,
object_map=None):
"""Create SaveableObjects and protos for gathered objects."""
object_names = object_identity.ObjectIdentityDictionary()
for obj, path in path_to_root.items():
object_names[obj] = _object_prefix_from_path(path)
node_ids = object_identity.ObjectIdentityDictionary()
for node_id, node in enumerate(trackable_objects):
node_ids[node] = node_id
slot_variables = _serialize_slot_variables(
trackable_objects=trackable_objects,
node_ids=node_ids,
object_names=object_names)
object_graph_proto = self._fill_object_graph_proto(
trackable_objects=trackable_objects,
node_ids=node_ids,
slot_variables=slot_variables)
named_saveable_objects, feed_additions = (
self._add_attributes_to_object_graph(
trackable_objects=trackable_objects,
object_graph_proto=object_graph_proto,
node_ids=node_ids,
object_names=object_names,
object_map=object_map))
return named_saveable_objects, object_graph_proto, feed_additions
def serialize_object_graph(self):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Trackable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Returns:
A tuple of (named_variables, object_graph_proto, feed_additions):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A TrackableObjectGraph protocol buffer
containing the serialized object graph and variable references.
feed_additions: A dictionary mapping from Tensors to values which should
be fed when saving.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
trackable_objects, path_to_root = self._breadth_first_traversal()
return self._serialize_gathered_objects(
trackable_objects, path_to_root)
def frozen_saveable_objects(self, object_map=None, to_graph=None):
"""Creates SaveableObjects with the current object graph frozen."""
trackable_objects, path_to_root = self._breadth_first_traversal()
if to_graph:
target_context = to_graph.as_default
else:
target_context = ops.NullContextmanager
with target_context():
named_saveable_objects, graph_proto, _ = self._serialize_gathered_objects(
trackable_objects,
path_to_root,
object_map)
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
named_saveable_objects.append(
base.NoRestoreSaveable(
tensor=object_graph_tensor,
name=base.OBJECT_GRAPH_PROTO_KEY))
return named_saveable_objects
def objects_ids_and_slot_variables(self):
"""Traverse the object graph and list all accessible objects.
Looks for `Trackable` objects which are dependencies of
`root_trackable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_trackable`
(i.e. if they would be saved with a checkpoint).
Returns:
A tuple of (trackable objects, object -> node id, slot variables)
"""
trackable_objects, path_to_root = self._breadth_first_traversal()
object_names = object_identity.ObjectIdentityDictionary()
for obj, path in path_to_root.items():
object_names[obj] = _object_prefix_from_path(path)
node_ids = object_identity.ObjectIdentityDictionary()
for node_id, node in enumerate(trackable_objects):
node_ids[node] = node_id
slot_variables = _serialize_slot_variables(
trackable_objects=trackable_objects,
node_ids=node_ids,
object_names=object_names)
return trackable_objects, node_ids, slot_variables
def list_objects(self):
"""Traverse the object graph and list all accessible objects."""
trackable_objects, _, _ = self.objects_ids_and_slot_variables()
return trackable_objects
|
tensorflow-master
|
tensorflow/python/training/tracking/graph_view.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import weakref
from absl.testing import parameterized
import six
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class InterfaceTests(test.TestCase):
def testLayerDeduplication(self):
model = training.Model()
layer_one = core.Dense(1)
layer_two = core.Dense(1)
model.other_path = [layer_one, layer_two]
model.l2 = layer_two
model.l1 = layer_one
self.assertEqual([layer_one, layer_two], model.layers)
def testSaveWithOnlyKerasSession(self):
with ops.Graph().as_default():
inp = input_layer.Input([1])
dense = core.Dense(1)(inp)
model = training.Model(inp, dense)
model.compile(optimizer="sgd", loss="mse")
model.fit([1.], [2.])
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testAddVariable(self):
obj = NonLayerTrackable()
with self.assertRaisesRegexp(ValueError, "do not specify shape"):
trackable_utils.add_variable(
obj, name="shape_specified_twice", shape=[], initializer=1)
constant_initializer = trackable_utils.add_variable(
obj, name="constant_initializer", initializer=1)
with variable_scope.variable_scope("some_variable_scope"):
ones_initializer = trackable_utils.add_variable(
obj,
name="ones_initializer",
shape=[2],
initializer=init_ops.ones_initializer(dtype=dtypes.float32))
bare_initializer = trackable_utils.add_variable(
obj,
name="bare_initializer",
shape=[2, 2],
dtype=dtypes.float64,
initializer=init_ops.zeros_initializer)
# Even in graph mode, there are no naming conflicts between objects, only
# naming conflicts within an object.
other_duplicate = resource_variable_ops.ResourceVariable(
name="duplicate", initial_value=1.)
duplicate = trackable_utils.add_variable(
obj, name="duplicate", shape=[])
with self.assertRaisesRegexp(ValueError, "'duplicate'.*already declared"):
trackable_utils.add_variable(obj, name="duplicate", shape=[])
self.evaluate(trackable_utils.gather_initializers(obj))
self.assertEqual("constant_initializer:0", constant_initializer.name)
self.assertEqual(1, self.evaluate(constant_initializer))
self.assertEqual("some_variable_scope/ones_initializer:0",
ones_initializer.name)
self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
self.assertAllEqual([[0., 0.],
[0., 0.]], self.evaluate(bare_initializer))
self.assertEqual("a_variable:0", obj.a_variable.name)
self.assertEqual("duplicate:0", other_duplicate.name)
if context.executing_eagerly():
# When executing eagerly, there's no uniquification of variable names. The
# checkpoint name will be the same.
self.assertEqual("duplicate:0", duplicate.name)
else:
# The .name attribute may be globally influenced, but the checkpoint name
# won't be (tested below).
self.assertEqual("duplicate_1:0", duplicate.name)
named_variables, _, _ = (
graph_view.ObjectGraphView(obj).serialize_object_graph())
expected_checkpoint_names = (
"a_variable/.ATTRIBUTES/VARIABLE_VALUE",
"bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
"duplicate/.ATTRIBUTES/VARIABLE_VALUE",
"ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
)
six.assertCountEqual(
self, expected_checkpoint_names, [v.name for v in named_variables])
def testInitNotCalled(self):
class NoInit(tracking.AutoTrackable):
def __init__(self):
pass
# __init__ for Trackable will be called implicitly.
trackable_utils.add_variable(NoInit(), "var", shape=[])
def testShapeDtype(self):
root = tracking.AutoTrackable()
v1 = trackable_utils.add_variable(
root, name="v1", initializer=3., dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v1.dtype)
v2 = trackable_utils.add_variable(
root,
name="v2",
shape=[3],
initializer=init_ops.ones_initializer,
dtype=dtypes.float64)
self.assertEqual(dtypes.float64, v2.dtype)
self.assertAllEqual([1., 1., 1.], self.evaluate(v2))
def testObjectMetadata(self):
with context.eager_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
dense = core.Dense(1)
checkpoint = trackable_utils.Checkpoint(dense=dense)
dense(constant_op.constant([[1.]]))
save_path = checkpoint.save(checkpoint_prefix)
objects = trackable_utils.object_metadata(save_path)
all_variable_names = []
for obj in objects.nodes:
for attribute in obj.attributes:
all_variable_names.append(attribute.full_name)
self.assertIn("dense/kernel", all_variable_names)
def testNotTrackable(self):
class CallsFunctionalStuff(
tracking.NotTrackable, tracking.AutoTrackable):
pass
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
checkpoint = trackable_utils.Checkpoint(x=CallsFunctionalStuff())
with self.assertRaises(NotImplementedError):
checkpoint.save(prefix)
class CallsFunctionalStuffOtherMRO(
tracking.AutoTrackable, tracking.NotTrackable):
pass
checkpoint_reversed = trackable_utils.Checkpoint(
x=CallsFunctionalStuffOtherMRO())
with self.assertRaises(NotImplementedError):
checkpoint_reversed.save(prefix)
class _MirroringSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
tensor = self._primary_variable.read_value()
spec = saver_lib.BaseSaverBuilder.SaveSpec(
tensor=tensor,
slice_spec="",
name=name)
super(_MirroringSaveable, self).__init__(
tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class _OwnsMirroredVariables(base.Trackable):
"""A Trackable object which returns a more complex SaveableObject."""
def __init__(self):
self.non_dep_variable = variable_scope.get_variable(
name="non_dep_variable", initializer=6., use_resource=True)
self.mirrored = variable_scope.get_variable(
name="mirrored", initializer=15., use_resource=True)
def _gather_saveables_for_checkpoint(self):
def _saveable_factory(name=self.non_dep_variable.name):
return _MirroringSaveable(
primary_variable=self.non_dep_variable,
mirrored_variable=self.mirrored,
name=name)
return {base.VARIABLE_VALUE_KEY: _saveable_factory}
# The Saver sorts by name before parsing, so we need a name property.
@property
def name(self):
return self.non_dep_variable.name
class CheckpointingTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.Adam(0.001)
step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, step=step)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = control_flow_ops.group(
optimizer.apply_gradients(zip(gradients, variables)),
step.assign_add(1))
with backprop.GradientTape() as tape:
loss = other_model(input_value)
variables = other_model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = graph_view.ObjectGraphView(
root_trackable).serialize_object_graph()
expected_slot_keys = (
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
"optimizer/learning_rate",
"optimizer/beta_1",
"optimizer/beta_2",
"optimizer/iter",
"optimizer/decay",
) + expected_slot_keys
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual("Adam/beta_1",
named_variables["optimizer/beta_1" + suffix].full_name)
self.assertEqual("Adam/beta_2",
named_variables["optimizer/beta_2" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
children = [node.local_name for node in optimizer_node.children]
six.assertCountEqual(
self,
# hyper variable dependencies
["beta_1", "beta_2", "iter", "decay", "learning_rate"],
children)
serialized_slot_keys = []
for slot in optimizer_node.slot_variables:
for attribute in (
serialized_graph.nodes[slot.slot_variable_node_id].attributes):
serialized_slot_keys.append(attribute.checkpoint_key)
six.assertCountEqual(
self,
[key + suffix for key in expected_slot_keys],
serialized_slot_keys)
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturned(self):
v = _OwnsMirroredVariables()
checkpoint = trackable_utils.Checkpoint(v=v)
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
self.evaluate(v.non_dep_variable.assign(42.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
self.evaluate(v.non_dep_variable.assign(44.))
save_path = checkpoint.save(prefix)
self.evaluate(v.non_dep_variable.assign(45.))
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
self.assertEqual(44., self.evaluate(v.non_dep_variable))
self.assertEqual(44., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testMoreComplexSaveableReturnedWithGlobalName(self):
# The same object can also be saved using the name-based saver.
v = _OwnsMirroredVariables()
saver = saver_lib.Saver(var_list=[v])
test_dir = self.get_temp_dir()
prefix = os.path.join(test_dir, "ckpt")
with self.cached_session() as sess:
self.evaluate(v.non_dep_variable.assign(42.))
save_path = saver.save(sess, prefix)
self.evaluate(v.non_dep_variable.assign(43.))
self.evaluate(v.mirrored.assign(44.))
saver.restore(sess, save_path)
self.assertEqual(42., self.evaluate(v.non_dep_variable))
self.assertEqual(42., self.evaluate(v.mirrored))
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.assertFalse(root_trackable.save_counter.trainable)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_trackable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_trackable.save_counter, 3))
optimizer_variables = self.evaluate(
sorted(optimizer.variables(), key=lambda v: v.name))
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_trackable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.Adam(0.001)
on_create_root = trackable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value,
var_list=[dummy_var])
status.assert_existing_objects_matched()
status.assert_consumed()
self.assertAllEqual(
optimizer_variables,
# Creation order is different, so .variables() needs to be re-sorted.
self.evaluate(sorted(optimizer.variables(), key=lambda v: v.name)))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer.iterations.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.CheckpointV1(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
def _train_fn(model, input_value):
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=1)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(_train_fn, model, input_value)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testFreezing(self):
with test_util.use_gpu():
# Save an object-based checkpoint using a frozen saver
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
self.evaluate(v.assign(3))
# Create the save counter so assert_consumed doesn't complain about it not
# existing in the checkpoint on restore.
self.evaluate(checkpoint.save_counter.assign(12))
saver = trackable_utils.frozen_saver(checkpoint)
with ops.device("cpu:0"):
prefix_tensor = constant_op.constant(prefix)
self.evaluate(saver.save(prefix_tensor))
self.evaluate(v.assign(10))
# Use the frozen saver to restore the same object graph
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore using another frozen saver on an identical object graph
del v, checkpoint, saver
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(v=v)
saver = trackable_utils.frozen_saver(checkpoint)
self.evaluate(saver.restore(prefix_tensor))
self.assertEqual(3, self.evaluate(v))
# Restore as an object-based checkpoint
del v, checkpoint, saver
checkpoint = trackable_utils.Checkpoint()
status = checkpoint.restore(prefix)
v = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
if context.executing_eagerly():
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
self.assertEqual(0, self.evaluate(v))
checkpoint.v = v
status.assert_consumed().run_restore_ops()
self.assertEqual(3, self.evaluate(v))
self.assertEqual(12, self.evaluate(checkpoint.save_counter))
@test_util.run_in_graph_and_eager_modes
def testCustomNumbering(self):
directory = self.get_temp_dir()
prefix = os.path.join(directory, "ckpt")
step = resource_variable_ops.ResourceVariable(0, dtype=dtypes.int64)
checkpoint = trackable_utils.Checkpoint(step=step)
self.evaluate(step.initializer)
for i in range(5):
path = checkpoint.write("%s-%d" % (prefix, self.evaluate(step)))
expected_suffix = "-%d" % (2 * i,)
if not path.endswith(expected_suffix):
self.fail("%s should have suffix %s" % (path, expected_suffix))
self.evaluate(step.assign_add(2))
def testPartialRestoreWarningObject(self):
with context.eager_mode():
optimizer = adam.Adam(0.0)
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.),
optimizer=optimizer)
# Create a slot variable to save
optimizer.minimize(original_root.v1.read_value, [original_root.v1])
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path)
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v2'", messages)
self.assertIn("(root).optimizer's state 'm' for (root).v1", messages)
self.assertNotIn("(root).v1'", messages)
self.assertIn("expect_partial()", messages)
def testPartialRestoreWarningAttribute(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
with test.mock.patch.object(logging, "warning") as mock_log:
# Note: Unlike in testPartialRestoreWarningObject, the warning actually
# prints immediately here, since all of the objects have been created
# and there's no deferred restoration sitting around.
partial_root.restore(save_path)
self.assertEqual(3., partial_root.v2.numpy())
del partial_root
self.assertIsNone(weak_partial_root())
messages = str(mock_log.call_args_list)
self.assertIn("(root).v1", messages)
self.assertNotIn("(root).v2", messages)
self.assertIn("expect_partial()", messages)
def testAttributeException(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=base.Trackable(),
v2=variables_lib.Variable(0.))
status = partial_root.restore(save_path)
with self.assertRaisesRegexp(
AssertionError,
r"Unused attributes(.|\n)*\(root\).v1"):
status.assert_consumed()
def testSilencePartialWarning(self):
with context.eager_mode():
original_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(2.),
v2=variables_lib.Variable(3.))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_path = original_root.save(prefix)
partial_root = trackable_utils.Checkpoint(v1=variables_lib.Variable(0.))
weak_partial_root = weakref.ref(partial_root)
weak_v1 = weakref.ref(partial_root.v1)
partial_root.restore(save_path).expect_partial()
self.assertEqual(2., partial_root.v1.numpy())
with test.mock.patch.object(logging, "warning") as mock_log:
del partial_root
self.assertIsNone(weak_partial_root())
self.assertIsNone(weak_v1())
self.assertEmpty(mock_log.call_args_list)
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.Adam(0.)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@def_function.function
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables))
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(optimizer.iterations))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = tracking.AutoTrackable()
trackable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
with ops.name_scope("root/" + named_variable.name):
pass # Make sure we can use this as an op name if we prefix it.
return named_variable.name
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testVariableNameEscaping(self):
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c"))
self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b"))
self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/"))
self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S"))
self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix,
self._get_checkpoint_name(r"d/.ATTRIBUTES/f"))
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNumberedPath(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
root.leaf = leaf
trackable_utils.add_variable(leaf, name="v", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", named_variable.name)
@test_util.run_in_graph_and_eager_modes
def testLocalNameValidation(self):
root = tracking.AutoTrackable()
leaf = tracking.AutoTrackable()
# Dots are escaped, which avoids conflicts with reserved names.
root._track_trackable(leaf, name=".ATTRIBUTES")
trackable_utils.add_variable(trackable=leaf, name="a", shape=[])
(named_variable,), _, _ = graph_view.ObjectGraphView(
root).serialize_object_graph()
self.assertEqual("..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE",
named_variable.name)
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.Adam(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = trackable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testLateDependencyTracking(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class LateDependencies(trackable_utils.Checkpoint):
def add_dep(self):
self.dep = Dependency()
self.dep.build()
original = LateDependencies()
original.add_dep()
self.evaluate(state_ops.assign(original.dep.var, 123.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = original.save(checkpoint_prefix)
load_into = LateDependencies()
status = load_into.restore(save_path)
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
load_into.add_dep()
status.assert_consumed()
status.assert_existing_objects_matched().run_restore_ops()
self.assertEqual(123., self.evaluate(load_into.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDepAfterVar(self):
class Dependency(tracking.AutoTrackable):
def build(self):
self.var = trackable_utils.add_variable(
self, "var", initializer=0.)
class DepAfterVar(trackable_utils.Checkpoint):
def add_dep(self):
dep = Dependency()
dep.build()
self.dep = dep
dep_after_var = DepAfterVar()
dep_after_var.add_dep()
self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = dep_after_var.save(checkpoint_prefix)
loaded_dep_after_var = DepAfterVar()
status = loaded_dep_after_var.restore(save_path)
loaded_dep_after_var.add_dep()
status.assert_consumed()
status.run_restore_ops()
self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var))
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = trackable_utils.Checkpoint()
root.var = trackable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.Adam(0.1)
variables = [root.var]
gradients = [1.]
train_op = optimizer.apply_gradients(zip(gradients, variables))
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(trackable_utils.gather_initializers(
trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(
optimizer.get_slot(slot_name="m", var=root.var),
14.))
slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
new_root = trackable_utils.Checkpoint()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = new_root.restore(slots_path)
no_slot_status = new_root.restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = trackable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.Adam(0.1)
slot_status.assert_existing_objects_matched()
if not context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "Unresolved object"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
else:
# Slot variables are not created eagerly when graph building.
with self.assertRaises(KeyError):
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)
variables = [new_root.var]
gradients = [1.]
train_op = new_root.optimizer.apply_gradients(zip(gradients, variables))
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
if not context.executing_eagerly():
# The train op hasn't run when graph building, so the slot variable has
# its restored value. It has run in eager, so the value will be different.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(slot_name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
@test_util.run_in_graph_and_eager_modes
def testOverlappingRestores(self):
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep = tracking.AutoTrackable()
save_root.dep.var = trackable_utils.add_variable(
save_root.dep, name="var", initializer=0.)
self.evaluate(state_ops.assign(save_root.dep.var, 12.))
first_path = save_root.save(os.path.join(checkpoint_directory, "first"))
self.evaluate(state_ops.assign(save_root.dep.var, 13.))
second_path = save_root.save(os.path.join(checkpoint_directory, "second"))
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
first_status = first_root.restore(first_path)
second_status = second_root.restore(second_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(13., self.evaluate(load_dep.var))
# Try again with the order of the restore() reversed. The last restore
# determines the final value.
first_root = trackable_utils.Checkpoint()
second_root = trackable_utils.Checkpoint()
second_status = second_root.restore(second_path)
first_status = first_root.restore(first_path)
load_dep = tracking.AutoTrackable()
load_dep.var = trackable_utils.add_variable(
load_dep, name="var", shape=[])
first_root.dep = load_dep
first_status.assert_consumed()
first_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
second_root.dep = load_dep
second_status.assert_consumed()
second_status.run_restore_ops()
self.assertEqual(12., self.evaluate(load_dep.var))
@test_util.run_in_graph_and_eager_modes
def testAmbiguousLoad(self):
# Not OK to split one checkpoint object into two
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
dep_three = tracking.AutoTrackable()
save_root.dep_one.dep_three = dep_three
save_root.dep_two.dep_three = dep_three
trackable_utils.add_variable(dep_three, name="var", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
status = load_root.restore(save_path)
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = tracking.AutoTrackable()
load_root.dep_one.dep_three = tracking.AutoTrackable()
load_root.dep_two.dep_three = tracking.AutoTrackable()
trackable_utils.add_variable(
load_root.dep_one.dep_three, name="var", initializer=0.)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
@test_util.run_in_graph_and_eager_modes
def testObjectsCombined(self):
# Currently fine to load two checkpoint objects into one Python object
checkpoint_directory = self.get_temp_dir()
save_root = trackable_utils.Checkpoint()
save_root.dep_one = tracking.AutoTrackable()
save_root.dep_two = tracking.AutoTrackable()
trackable_utils.add_variable(
save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
trackable_utils.add_variable(
save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
self.evaluate(trackable_utils.gather_initializers(save_root))
save_path = save_root.save(os.path.join(checkpoint_directory, "ckpt"))
load_root = trackable_utils.Checkpoint()
load_root.dep_one = tracking.AutoTrackable()
load_root.dep_two = load_root.dep_one
v1 = trackable_utils.add_variable(
load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
v2 = trackable_utils.add_variable(
load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
status = load_root.restore(
save_path).assert_consumed().assert_existing_objects_matched()
status.run_restore_ops()
self.assertEqual(32., self.evaluate(v1))
self.assertEqual(64., self.evaluate(v2))
@test_util.run_in_graph_and_eager_modes
def testDependencyLoop(self):
# Note: this test creates garbage during eager execution because it
# purposefully creates a reference cycle.
first = trackable_utils.Checkpoint()
second = trackable_utils.Checkpoint()
first.second = second
second.first = first
first.v = trackable_utils.add_variable(
first, "v1", initializer=[3., 1., 4.])
second.v = trackable_utils.add_variable(
second, "v2", initializer=[1., 1., 2., 3.])
self.evaluate(trackable_utils.gather_initializers(first))
checkpoint_directory = self.get_temp_dir()
save_path = first.save(os.path.join(checkpoint_directory, "ckpt"))
# Test deferred loading
first_load = trackable_utils.Checkpoint()
status = first_load.restore(save_path)
second_load = tracking.AutoTrackable()
first_load.second = second_load
second_load.first = first_load
with self.assertRaises(AssertionError):
status.assert_consumed()
first_load.v = trackable_utils.add_variable(
first_load, "v1", shape=[3])
second_load.v = trackable_utils.add_variable(
second_load, "v2", shape=[4])
status.assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
# Test loading when variables have already been created
self.evaluate(first_load.v.assign([2., 7., 1.]))
self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
status = first_load.restore(save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
@test_util.run_in_graph_and_eager_modes
def testRestoreOnAssign(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first = trackable_utils.Checkpoint()
first.var1 = variables_lib.Variable(0., name="outside_var")
first.var2 = variables_lib.Variable(0., name="blah")
self.evaluate(first.var1.assign(4.))
self.evaluate(first.var2.assign(8.))
save_path = first.save(checkpoint_prefix)
second = trackable_utils.Checkpoint()
second.var2 = variables_lib.Variable(0., name="blah")
status = second.restore(save_path)
recreated_var1 = variables_lib.Variable(0., name="outside_var")
status.run_restore_ops()
self.assertEqual(8., self.evaluate(second.var2))
self.evaluate(recreated_var1.assign(-2.))
self.assertEqual(-2., self.evaluate(recreated_var1))
second.var1 = recreated_var1
status.run_restore_ops()
self.assertEqual(4., self.evaluate(recreated_var1))
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
obj.save(checkpoint_prefix)
graph.finalize()
obj.save(checkpoint_prefix)
@test_util.run_in_graph_and_eager_modes
def testCheckpointState(self):
# No checkpoints are deleted by default
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
saver = trackable_utils.Checkpoint(obj=obj)
for _ in range(10):
saver.save(checkpoint_prefix)
expected_filenames = ["checkpoint"]
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
@test_util.run_in_graph_and_eager_modes
def testCheckpointStateChangingVarList(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.AutoTrackable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
self.evaluate(trackable_utils.gather_initializers(obj))
checkpoint = trackable_utils.Checkpoint(obj=obj)
looped_variables = []
for iteration in range(10):
new_variable = resource_variable_ops.ResourceVariable(iteration)
self.evaluate(new_variable.initializer)
setattr(checkpoint, "var_%d" % iteration, new_variable)
checkpoint.save(checkpoint_prefix)
looped_variables.append(new_variable)
expected_filenames = ["checkpoint"]
# We've copied the saver each time, but checkpoint management should still
# be consistent. Nothing gets deleted.
for checkpoint_number in range(1, 11):
expected_filenames.append("ckpt-%d.index" % (checkpoint_number,))
self.assertEmpty(
set(expected_filenames)
- set(os.listdir(checkpoint_directory)))
self.assertEqual(
checkpoint_prefix + "-10",
checkpoint_management.latest_checkpoint(checkpoint_directory))
# The checkpoint list only contains the most recent checkpoint, but they're
# all on disk. This means we won't eventually run into proto size limits.
self.assertEqual(
[checkpoint_prefix + "-10"],
(checkpoint_management.get_checkpoint_state(checkpoint_directory)
.all_model_checkpoint_paths))
for v in looped_variables:
self.evaluate(v.assign(314))
checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops()
self.assertEqual(314, self.evaluate(checkpoint.var_9))
self.assertEqual(314, self.evaluate(checkpoint.var_8))
self.assertEqual(314, self.evaluate(checkpoint.var_6))
self.assertEqual(5, self.evaluate(checkpoint.var_5))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops()
self.assertEqual(9, self.evaluate(checkpoint.var_9))
self.assertEqual(8, self.evaluate(checkpoint.var_8))
self.assertEqual(1, self.evaluate(checkpoint.var_1))
self.assertEqual(0, self.evaluate(checkpoint.var_0))
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variables_lib.Variable(0., name="v")
obj.opt = adam.Adam(0.1)
variables = [obj.var]
gradients = [1.]
obj.opt.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(obj))
save_path = obj.save(checkpoint_prefix)
obj.restore(save_path)
graph.finalize()
obj.restore(save_path)
@test_util.run_in_graph_and_eager_modes
def test_sequential(self):
model = sequential.Sequential()
checkpoint = trackable_utils.Checkpoint(model=model)
model.add(core.Dense(4))
second_dense = core.Dense(5)
model.add(second_dense)
model(constant_op.constant([[1.]]))
checkpoint.restore(None).initialize_or_restore()
self.evaluate(second_dense.bias.assign(
constant_op.constant([1., 2., 3., 4., 5.])))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
self.evaluate(second_dense.bias.assign(
constant_op.constant([5., 6., 7., 8., 9.])))
checkpoint.restore(save_path).assert_consumed().run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.], self.evaluate(second_dense.bias))
deferred_sequential = sequential.Sequential()
deferred_sequential_checkpoint = trackable_utils.Checkpoint(
model=deferred_sequential)
status = deferred_sequential_checkpoint.restore(save_path)
deferred_sequential.add(core.Dense(4))
deferred_sequential(constant_op.constant([[1.]]))
deferred_second_dense = core.Dense(5)
deferred_sequential.add(deferred_second_dense)
deferred_sequential(constant_op.constant([[1.]]))
status.run_restore_ops()
self.assertAllEqual([1., 2., 3., 4., 5.],
self.evaluate(deferred_second_dense.bias))
@test_util.run_in_graph_and_eager_modes
def test_initialize_if_not_restoring(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
model=model) # Do not save the optimizer with the checkpoint.
optimizer_checkpoint = trackable_utils.Checkpoint(
optimizer=optimizer)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
def train_fn():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
# TODO(tanzheny): Add hyper variables to .variables(), and set them with
# set_weights etc.
variables_not_in_the_variables_property = [
obj for obj in optimizer._hyper.values()
if isinstance(obj, variables_lib.Variable)]
self.evaluate([v.initializer for v
in optimizer.variables()
+ variables_not_in_the_variables_property])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.beta_1.assign(42.))
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
del train_fn
# Restore into a graph with the optimizer
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
status = root.restore(save_path=model_save_path)
input_value = constant_op.constant([[3.]])
def train_fn1():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn1 = functools.partial(self.evaluate, train_fn1())
status.initialize_or_restore()
train_fn1()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
del train_fn1
# Make sure initialization doesn't clobber later restores
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.Adam(0.001, beta_1=1.0)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
opt_root = trackable_utils.Checkpoint(
optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(save_path=optimizer_save_path)
input_value = constant_op.constant([[3.]])
def train_fn2():
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
return optimizer.apply_gradients(zip(gradients, variables))
if not context.executing_eagerly():
train_fn2 = functools.partial(self.evaluate, train_fn2())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn2()
self.assertEqual(42., self.evaluate(optimizer.beta_1))
@test_util.run_in_graph_and_eager_modes
def test_restore_after_adding_empty_trackable_data_structure(self):
model = NonLayerTrackable()
checkpoint = trackable_utils.Checkpoint(model=model)
checkpoint.restore(None).initialize_or_restore()
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = checkpoint.save(checkpoint_prefix)
del model, checkpoint
model = NonLayerTrackable()
model.dict = {"a": 1}
model.list = {"b": 1}
checkpoint = trackable_utils.Checkpoint(model=model)
load_status = checkpoint.restore(save_path)
load_status.assert_existing_objects_matched().run_restore_ops()
@test_util.run_in_graph_and_eager_modes
def test_write_checkpoint_from_function(self):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_checkpoint = trackable_utils.Checkpoint(
v=variables_lib.Variable(1.))
@def_function.function
def _write_checkpoint():
save_path = save_checkpoint.write(checkpoint_prefix)
return save_path
self.evaluate([save_checkpoint.v.initializer])
self.evaluate(_write_checkpoint())
load_checkpoint = trackable_utils.Checkpoint(
v=variables_lib.Variable(0.))
load_checkpoint.restore(checkpoint_prefix).run_restore_ops()
self.assertEqual(1., self.evaluate(load_checkpoint.v))
self.evaluate(save_checkpoint.v.assign(3.))
self.evaluate(_write_checkpoint())
self.evaluate(save_checkpoint.v.assign(0.))
load_checkpoint.restore(checkpoint_prefix).run_restore_ops()
self.assertEqual(3., self.evaluate(load_checkpoint.v))
class _ManualScope(tracking.AutoTrackable):
def __call__(self):
with variable_scope.variable_scope("ManualScope") as vs:
self.variable_scope = vs
with trackable_utils.capture_dependencies(template=self):
return self._build()
def _build(self):
return variable_scope.get_variable(name="in_manual_scope", shape=[])
class TemplateTests(parameterized.TestCase, test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
manual = _ManualScope()
return v, v + 1., v2, manual, manual()
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()
six.assertCountEqual(
self,
[v1_save, v2_save, manual_scope, manual_scope_v, save_template],
trackable_utils.list_objects(save_template))
manual_dep, = manual_scope._checkpoint_dependencies
self.assertEqual("in_manual_scope", manual_dep.name)
self.assertIs(manual_scope_v, manual_dep.ref)
optimizer = adam.Adam(0.0)
save_root = trackable_utils.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value,
var_list=[v1_save])
self.evaluate([v.initializer for v in save_template.variables])
optimizer_variables = optimizer.variables() + list(
optimizer._hyper.values())
self.evaluate([v.initializer for v in optimizer_variables])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.Adam(0.0)
load_root = trackable_utils.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2, _, _ = load_template()
load_optimizer.minimize(var.read_value, var_list=[var])
self.assertLen(load_template._checkpoint_dependencies, 3)
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
self.assertEqual("ManualScope",
load_template._checkpoint_dependencies[2].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore_nested(self):
def _inner_template():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer())
return v
def _outer_template():
first_inner = template.make_template("i1", _inner_template)
second_inner = template.make_template("i2", _inner_template)
v1 = first_inner()
v2 = second_inner()
v3 = second_inner()
return (first_inner, second_inner), (v1, v2, v3)
with variable_scope.variable_scope("ignored"):
save_template = template.make_template("s1", _outer_template)
save_root = trackable_utils.Checkpoint(my_template=save_template)
(inner_template_one, inner_template_two), _ = save_template()
self.evaluate(inner_template_one.variables[0].assign([20.]))
self.evaluate(inner_template_two.variables[0].assign([25.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _outer_template)
load_root = trackable_utils.Checkpoint(my_template=load_template)
status = load_root.restore(save_path)
(inner_template_one, inner_template_two), (v1, v2, v3) = load_template()
outer_template_dependencies = load_root.my_template._checkpoint_dependencies
self.assertLen(outer_template_dependencies, 2)
self.assertEqual("i1", outer_template_dependencies[0].name)
self.assertIs(inner_template_one, outer_template_dependencies[0].ref)
self.assertEqual("i2", outer_template_dependencies[1].name)
self.assertIs(inner_template_two, outer_template_dependencies[1].ref)
self.assertLen(inner_template_one._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_one._checkpoint_dependencies[0].name)
self.assertLen(inner_template_two._checkpoint_dependencies, 1)
self.assertEqual("v", inner_template_two._checkpoint_dependencies[0].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([20.], self.evaluate(v1))
self.assertAllEqual([25.], self.evaluate(v2))
self.assertAllEqual([25.], self.evaluate(v3))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.Adam(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
with backprop.GradientTape() as tape:
loss = model(input_value)
variables = model.trainable_variables
gradients = tape.gradient(loss, variables)
train_op = optimizer.apply_gradients(zip(gradients, variables))
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, slot_name="m").assign([2.]))
self.evaluate(optimizer.beta_1.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")
.assign([102.]))
self.evaluate(root_trackable.optimizer.beta_1.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, slot_name="m")))
self.assertAllEqual(3.,
self.evaluate(root_trackable.optimizer.beta_1))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = saver_lib.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer.iterations)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = trackable_utils.TrackableSaver(
graph_view.ObjectGraphView(root))
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
status.assert_nontrivial_match()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the name-based
# checkpoint.
root.not_in_name_checkpoint = resource_variable_ops.ResourceVariable([1.])
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/util_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object-based saving which use tf.train.* optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import six
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util as trackable_utils
class NonLayerTrackable(tracking.AutoTrackable):
def __init__(self):
super(NonLayerTrackable, self).__init__()
self.a_variable = trackable_utils.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Trackables which aren't Layers.
self._non_layer = NonLayerTrackable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = graph_view.ObjectGraphView(
root_trackable).serialize_object_graph()
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["optimizer_step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual(
"beta1_power",
named_variables["optimizer/beta1_power" + suffix].full_name)
self.assertEqual(
"beta2_power",
named_variables["optimizer/beta2_power" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power",
optimizer_node.children[0].local_name)
self.assertEqual("beta1_power",
serialized_graph.nodes[optimizer_node.children[0].node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=model._named_dense.kernel,
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_trackable.save_counter # pylint: disable=pointless-statement
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_trackable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_trackable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_trackable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_trackable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta1_power and beta2_power when appying gradients so we can
# test that they've been restored correctly.
beta1=1.0, beta2=1.0)
on_create_root = trackable_utils.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
status.assert_nontrivial_match()
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_existing_objects_matched()
status.assert_consumed()
beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testEagerDistributionStrategy(self):
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
def _train_fn(optimizer, model):
input_value = constant_op.constant([[3.]])
optimizer.minimize(
functools.partial(model, input_value),
global_step=root.optimizer_step)
for training_continuation in range(3):
strategy = mirrored_strategy.MirroredStrategy()
with strategy.scope():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
strategy.extended.call_for_each_replica(
functools.partial(_train_fn, optimizer, model))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testGraphDistributionStrategy(self):
self.skipTest("b/121381184")
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
def _train_fn(optimizer, model):
input_value = constant_op.constant([[3.]])
return optimizer.minimize(
functools.partial(model, input_value),
global_step=root.optimizer_step)
for training_continuation in range(3):
with ops.Graph().as_default():
strategy = mirrored_strategy.MirroredStrategy()
with strategy.scope():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
status = root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
train_op = strategy.extended.call_for_each_replica(
functools.partial(_train_fn, optimizer, model))
with self.session() as session:
if training_continuation > 0:
status.assert_consumed()
status.initialize_or_restore()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.CheckpointV1(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
else:
status.assert_consumed()
status.assert_existing_objects_matched()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
manager = checkpoint_management.CheckpointManager(
root, checkpoint_directory, max_to_keep=1)
status = root.restore(save_path=manager.latest_checkpoint)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
manager.save()
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@def_function.function
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def _get_checkpoint_name(self, name):
root = tracking.AutoTrackable()
trackable_utils.add_variable(
root, name=name, shape=[1, 2], dtype=dtypes.float64)
(named_variable,), _, _ = trackable_utils._serialize_object_graph(
root, saveables_cache=None)
with ops.name_scope("root/" + named_variable.name):
pass # Make sure we can use this as an op name if we prefix it.
return named_variable.name
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = trackable_utils.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = trackable_utils.Checkpoint()
root.var = trackable_utils.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(trackable_utils.gather_initializers(
trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
new_root = trackable_utils.Checkpoint()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = new_root.restore(slots_path)
no_slot_status = new_root.restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = trackable_utils.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
slot_status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(trackable_utils.gather_initializers(obj))
obj.save(checkpoint_prefix)
before_ops = graph.get_operations()
obj.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = trackable_utils.Checkpoint()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(trackable_utils.gather_initializers(obj))
save_path = obj.save(checkpoint_prefix)
obj.restore(save_path)
before_ops = graph.get_operations()
obj.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(trackable_utils.gather_initializers(
first_root_trackable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_trackable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(6.))
save_path = second_root_trackable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
status = second_root_trackable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
@test_util.run_in_graph_and_eager_modes
def test_initialize_if_not_restoring(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer_only_prefix = os.path.join(checkpoint_directory, "opt")
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
model=model, # Do not save the optimizer with the checkpoint.
global_step=training_util.get_or_create_global_step())
optimizer_checkpoint = trackable_utils.Checkpoint(
optimizer=optimizer)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
self.evaluate([v.initializer for v in optimizer.variables()])
train_fn()
model_save_path = root.save(file_prefix=checkpoint_prefix)
self.evaluate(optimizer.variables()[0].assign(42.))
optimizer_save_path = optimizer_checkpoint.save(optimizer_only_prefix)
# Restore into a graph with the optimizer
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
status = root.restore(save_path=model_save_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
train_fn()
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
with self.assertRaises(AssertionError):
status.assert_consumed()
# Make sure initialization doesn't clobber later restores
with test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001, beta1=1.0)
root = trackable_utils.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
opt_root = trackable_utils.Checkpoint(
optimizer=optimizer)
status = root.restore(save_path=model_save_path)
init_only_optimizer_status = opt_root.restore(save_path=None)
optimizer_status = opt_root.restore(save_path=optimizer_save_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
optimizer_status.run_restore_ops()
status.initialize_or_restore()
init_only_optimizer_status.initialize_or_restore()
train_fn()
self.assertEqual(42., self.evaluate(optimizer.variables()[0]))
class _ManualScope(tracking.AutoTrackable):
def __call__(self):
with variable_scope.variable_scope("ManualScope") as vs:
self.variable_scope = vs
with trackable_utils.capture_dependencies(template=self):
return self._build()
def _build(self):
return variable_scope.get_variable(name="in_manual_scope", shape=[])
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_trackable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
manual = _ManualScope()
return v, v + 1., v2, manual, manual()
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save, manual_scope, manual_scope_v = save_template()
six.assertCountEqual(
self,
[v1_save, v2_save, manual_scope, manual_scope_v, save_template],
trackable_utils.list_objects(save_template))
manual_dep, = manual_scope._checkpoint_dependencies
self.assertEqual("in_manual_scope", manual_dep.name)
self.assertIs(manual_scope_v, manual_dep.ref)
optimizer = adam.AdamOptimizer(0.0)
save_root = trackable_utils.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value)
self.evaluate([v.initializer for v in save_template.variables])
self.evaluate([v.initializer for v in optimizer.variables()])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.AdamOptimizer(0.0)
load_root = trackable_utils.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2, _, _ = load_template()
load_optimizer.minimize(var.read_value)
self.assertEqual(3, len(load_template._checkpoint_dependencies))
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
self.assertEqual("ManualScope",
load_template._checkpoint_dependencies[2].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_trackable = trackable_utils.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(trackable_utils.gather_initializers(
root_trackable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_trackable
def _set_sentinels(self, root_trackable):
self.evaluate(root_trackable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_trackable):
self.assertAllEqual(
[1.], self.evaluate(root_trackable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_trackable.optimizer.get_slot(
var=root_trackable.model._named_dense.bias, name="m")))
beta1_power, _ = root_trackable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = saver_lib.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = trackable_utils.TrackableSaver(
graph_view.ObjectGraphView(root))
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
status.assert_consumed()
status.assert_existing_objects_matched()
status.assert_nontrivial_match()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_existing_objects_matched()
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_nontrivial_match()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# Check that there is no error when keys are missing from the name-based
# checkpoint.
root.not_in_name_checkpoint = resource_variable_ops.ResourceVariable([1.])
status = object_saver.restore(save_path)
with self.assertRaises(AssertionError):
status.assert_existing_objects_matched()
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/util_with_v1_optimizers_test.py
|
"""Utilities for including Python state in TensorFlow checkpoints."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import functools
import six
from tensorflow.python.training.tracking import base
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.experimental.PythonState")
@six.add_metaclass(abc.ABCMeta)
class PythonState(base.Trackable):
"""A mixin for putting Python state in an object-based checkpoint.
This is an abstract class which allows extensions to TensorFlow's object-based
checkpointing (see `tf.train.Checkpoint`). For example a wrapper for NumPy
arrays:
```python
import io
import numpy
class NumpyWrapper(tf.train.experimental.PythonState):
def __init__(self, array):
self.array = array
def serialize(self):
string_file = io.BytesIO()
try:
numpy.save(string_file, self.array, allow_pickle=False)
serialized = string_file.getvalue()
finally:
string_file.close()
return serialized
def deserialize(self, string_value):
string_file = io.BytesIO(string_value)
try:
self.array = numpy.load(string_file, allow_pickle=False)
finally:
string_file.close()
```
Instances of `NumpyWrapper` are checkpointable objects, and will be saved and
restored from checkpoints along with TensorFlow state like variables.
```python
root = tf.train.Checkpoint(numpy=NumpyWrapper(numpy.array([1.])))
save_path = root.save(prefix)
root.numpy.array *= 2.
assert [2.] == root.numpy.array
root.restore(save_path)
assert [1.] == root.numpy.array
```
"""
@abc.abstractmethod
def serialize(self):
"""Callback to serialize the object. Returns a string."""
@abc.abstractmethod
def deserialize(self, string_value):
"""Callback to deserialize the object."""
def _gather_saveables_for_checkpoint(self):
"""Specify callbacks for saving and restoring `array`."""
return {
"py_state": functools.partial(
base.PythonStringStateSaveable,
state_callback=self.serialize,
restore_callback=self.deserialize)
}
|
tensorflow-master
|
tensorflow/python/training/tracking/python_state.py
|
"""Trackable data structures."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import operator
import sys
import six
try:
import wrapt
except ImportError:
# Fall back to the build-time dependency if the system package is not available.
from .....third_party import wrapt
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import layer_utils
class NoDependency(object):
"""Allows attribute assignment to `Trackable` objects with no dependency.
Example usage:
```python
obj = Trackable()
obj.has_dependency = tf.Variable(0., name="dep")
obj.no_dependency = NoDependency(tf.Variable(1., name="nodep"))
assert obj.no_dependency.name == "nodep:0"
```
`obj` in this example has a dependency on the variable "dep", and both
attributes contain un-wrapped `Variable` objects.
`NoDependency` also works with `tf.keras.Model`, but only for checkpoint
dependencies: wrapping a `Layer` in `NoDependency` will assign the (unwrapped)
`Layer` to the attribute without a checkpoint dependency, but the `Model` will
still track the `Layer` (so it will appear in `Model.layers`, and its
variables will appear in `Model.variables`).
"""
def __init__(self, value):
self.value = value
def _wrap_or_unwrap(value):
"""Wraps basic data structures, unwraps NoDependency objects."""
# pylint: disable=unidiomatic-typecheck
# Exact type checking to avoid mucking up custom logic in list/dict
# subclasses, e.g. collections.Counter.
if isinstance(value, NoDependency):
return value.value
if isinstance(value, base.Trackable):
return value # Skip conversion for already trackable objects.
elif type(value) == dict:
return _DictWrapper(value)
elif type(value) == collections.OrderedDict:
return _DictWrapper(value)
elif type(value) == list:
return ListWrapper(value)
else:
return value
# pylint: enable=unidiomatic-typecheck
# TODO(allenl): Handle other common data structures. Tuples will require
# special casing (tuple subclasses are not weak referenceable, so replacement
# with a wrapper that subclasses tuple on attribute assignment works poorly,
# and replacement with a wrapper that isn't a tuple is also problematic),
# probably a tree traversal where the leaves are non-tuples(/namedtuples) to
# come up with names. Dictionaries should look like lists.
def sticky_attribute_assignment(trackable, name, value):
"""Adds dependencies, generally called from __setattr__.
This behavior is shared between Trackable and Model.
Respects NoDependency indicators, but otherwise makes trackable objects
out of common data structures and tracks objects by their attribute names.
Args:
trackable: The object to add dependencies to (generally the one having
an attribute assigned).
name: The attribute name being assigned.
value: The value being assigned. Not necessarily a trackable object.
Returns:
The value which should be stored in the attribute (unwrapped from a
NoDependency object if necessary).
"""
if isinstance(value, NoDependency):
add_dependency = False
else:
add_dependency = True
value = _wrap_or_unwrap(value)
if not add_dependency:
return value
if isinstance(value, base.Trackable):
trackable._track_trackable( # pylint: disable=protected-access
value, name=name,
# Allow the user to switch the Trackable which is tracked by this
# name, since assigning a new variable to an attribute has
# historically been fine (e.g. Adam did this).
overwrite=True)
return value
class _UntrackableError(ValueError):
def __init__(self, value): # pylint: disable=super-init-not-called
self._value = value
def __str__(self):
return (("Only trackable objects (such as Layers or Optimizers) may be "
"stored in a List object. Got %s, which does not inherit from "
"Trackable.") % (self._value,))
class TrackableDataStructure(base.Trackable):
"""Base class for data structures which contain trackable objects."""
def __init__(self):
# Attributes prefixed with "_self_" for compatibility with
# wrapt.ObjectProxy.
self._self_trainable = True
self._self_extra_variables = []
@property
def trainable(self):
return self._self_trainable
@trainable.setter
def trainable(self, value):
self._self_trainable = value
def _track_value(self, value, name):
"""Add a dependency on `value`."""
value = sticky_attribute_assignment(
trackable=self, value=value, name=name)
if isinstance(value, variables.Variable):
self._self_extra_variables.append(value)
if not isinstance(value, base.Trackable):
raise _UntrackableError(value)
if hasattr(value, "_use_resource_variables"):
# In subclassed models, legacy layers (tf.layers) must always use
# resource variables.
value._use_resource_variables = True # pylint: disable=protected-access
return value
@property
def _values(self):
"""An iterable/sequence which may contain trackable objects."""
raise NotImplementedError("Abstract method")
@property
def _layers(self):
"""All Layers and Layer containers, including empty containers."""
# Filter objects on demand so that wrapper objects use values from the thing
# they're wrapping if out of sync.
collected = []
for obj in self._values:
if (isinstance(obj, TrackableDataStructure)
or layer_utils.is_layer(obj)
or layer_utils.has_weights(obj)):
collected.append(obj)
return collected
@property
def layers(self):
return layer_utils.filter_empty_layer_containers(self._layers)
@property
def trainable_weights(self):
return layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._self_extra_variables)
@property
def non_trainable_weights(self):
return layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._self_extra_variables)
@property
def weights(self):
return self.trainable_weights + self.non_trainable_weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
@property
def variables(self):
return self.weights
@property
def updates(self):
"""Aggregate updates from any `Layer` instances."""
# Updates and conditional losses are forwarded as-is rather than being
# filtered based on inputs, since this is just a container and won't ever
# have any inputs.
aggregated = []
for layer in self.layers:
if hasattr(layer, "updates"):
aggregated += layer.updates
return aggregated
@property
def losses(self):
"""Aggregate losses from any `Layer` instances."""
aggregated = []
for layer in self.layers:
if hasattr(layer, "losses"):
aggregated += layer.losses
return aggregated
def __hash__(self):
# Support object-identity hashing, so these structures can be used as keys
# in sets/dicts.
return id(self)
def __eq__(self, other):
# Similar to Tensors, trackable data structures use object-identity
# equality to support set/dict membership.
return self is other
class List(TrackableDataStructure, collections.Sequence):
"""An append-only sequence type which is trackable.
Maintains checkpoint dependencies on its contents (which must also be
trackable), and forwards any `Layer` metadata such as updates and losses.
Note that `List` is purely a container. It lets a `tf.keras.Model` or
other trackable object know about its contents, but does not call any
`Layer` instances which are added to it. To indicate a sequence of `Layer`
instances which should be called sequentially, use `tf.keras.Sequential`.
Example usage:
```python
class HasList(tf.keras.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = tf.contrib.checkpoint.List([layers.Dense(3)])
self.layer_list.append(layers.Dense(4))
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += tf.reduce_sum(x)
return aggregation
```
This kind of wrapping is necessary because `Trackable` objects do not
(yet) deeply inspect regular Python data structures, so for example assigning
a regular list (`self.layer_list = [layers.Dense(3)]`) does not create a
checkpoint dependency and does not add the `Layer` instance's weights to its
parent `Model`.
"""
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `list()`."""
super(List, self).__init__()
self._storage = self._make_storage(*args, **kwargs)
for index, element in enumerate(self._storage):
self._storage[index] = self._track_value(
element, name=self._name_element(index))
def copy(self):
return type(self)(copy.copy(self._storage))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return type(self)(copy.deepcopy(self._storage, memo))
def _make_storage(self, *args, **kwargs):
"""Determines the backing storage (overridden in subclasses)."""
return list(*args, **kwargs)
def _name_element(self, index):
return "%d" % (index,)
@property
def _values(self):
"""Collect values for TrackableDataStructure."""
return self
def append(self, value):
"""Add a new trackable value."""
value = self._track_value(value, self._name_element(len(self._storage)))
self._storage.append(value)
def extend(self, values):
"""Add a sequence of trackable values."""
for value in values:
self.append(value)
def __iadd__(self, values):
self.extend(values)
return self
def __add__(self, other):
return self.__class__(self._storage + getattr(other, "_storage", other))
def __imul__(self, y):
if y <= 0:
raise ValueError(
"List only supports append, multiplying in place by %d removes "
"elements." % y)
n = len(self._storage)
for _ in range(y - 1):
for i in range(n):
self.append(self._storage[i])
return self
def __mul__(self, n):
return self.__class__(self._storage * n)
def __rmul__(self, n):
return self * n
def __radd__(self, other):
return self.__class__(other) + self
def __getitem__(self, key):
return self._storage[key]
def __getslice__(self, i, j):
return self._storage[slice(i, j)]
def __len__(self):
return len(self._storage)
def __repr__(self):
return "List(%s)" % (repr(self._storage),)
def __sizeof__(self):
return super(List, self).__sizeof__() + sys.getsizeof(self._storage)
# TODO(tomhennigan) Update to collections.UserList?
# TODO(allenl): Try switching this to wrapt.ObjectProxy again when we drop
# Python 3.4 support (may still be tricky).
class ListWrapper(List, collections.MutableSequence,
# Shadowed, but there for isinstance checks.
list):
"""Wraps the built-in `list` to support restore-on-create for variables.
Unlike `List`, this sequence type is mutable in the same ways built-in lists
are. Instead of throwing an error immediately like `List`, it records
problematic mutations (e.g. assigning a new element to a position already
occupied, meaning both elements get the same names at different times) and
refuses to save.
On assignment to an attribute of a Model or Trackable object, Python
lists are replaced with ListWrapper. Wrapping a list in a
`tf.contrib.checkpoint.NoDependency` object prevents this.
"""
def __init__(self, wrapped_list):
"""Construct a new list wrapper.
Args:
wrapped_list: The initial value of the data structure. A shallow copy may
be maintained for error checking. `wrapped_list` itself should not be
modified directly after constructing the `ListWrapper`, and if changes
are detected the `ListWrapper` will throw an exception on save.
"""
# Monotonic flags which indicate this object would not be restored properly,
# and therefore should throw an error on save to avoid giving the impression
# that restoring it will work.
self._non_append_mutation = False
self._external_modification = False
super(ListWrapper, self).__init__(wrapped_list)
self._last_wrapped_list_snapshot = list(self._storage)
# pylint: disable=protected-access
def __copy__(self):
copied = super(ListWrapper, self).__copy__()
copied._non_append_mutation = self._non_append_mutation
copied._external_modification = self._external_modification
return copied
def __deepcopy__(self, memo):
copied = super(ListWrapper, self).__deepcopy__(memo)
copied._non_append_mutation = self._non_append_mutation
copied._external_modification = self._external_modification
return copied
# pylint: enable=protected-access
def __reduce_ex__(self, protocol):
return (self.__class__,
(self._storage,))
def _make_storage(self, wrapped_list):
"""Use the user's original list for storage."""
return wrapped_list
def _check_external_modification(self):
"""Checks for any changes to the wrapped list not through the wrapper."""
if self._external_modification or self._non_append_mutation:
return
if self._storage != self._last_wrapped_list_snapshot:
self._external_modification = True
self._last_wrapped_list_snapshot = None
def _update_snapshot(self):
"""Acknowledges tracked changes to the wrapped list."""
if self._external_modification or self._non_append_mutation:
return
self._last_wrapped_list_snapshot = list(self._storage)
@property
def _checkpoint_dependencies(self):
self._check_external_modification()
if self._non_append_mutation:
raise ValueError(
("Unable to save the object %s (a list wrapper constructed to track "
"trackable TensorFlow objects). A list element was replaced "
"(__setitem__, __setslice__), deleted (__delitem__, __delslice__), "
"or moved (sort). In order to support restoration on object "
"creation, tracking is exclusively for append-only data structures."
"\n\nIf you don't need this list checkpointed, wrap it in a "
"tf.contrib.checkpoint.NoDependency object; it will be "
"automatically un-wrapped and subsequently ignored." % (self,)))
if self._external_modification:
raise ValueError(
("Unable to save the object %s (a list wrapper constructed to track "
"trackable TensorFlow objects). The wrapped list was modified "
"outside the wrapper (its final value was %s, its value when a "
"checkpoint dependency was added was %s), which breaks restoration "
"on object creation.\n\nIf you don't need this list checkpointed, "
"wrap it in a tf.contrib.checkpoint.NoDependency object; it will be "
"automatically un-wrapped and subsequently ignored." % (
self, self._storage, self._last_wrapped_list_snapshot)))
return super(ListWrapper, self)._checkpoint_dependencies
def __delitem__(self, key):
self._non_append_mutation = True
del self._storage[key]
def __setitem__(self, key, value):
self._check_external_modification()
if isinstance(key, slice):
# Note: this is quite inefficient, but the list API supports a broad range
# of slice setters (e.g. truncate, extend, replace) and imitating this
# for a range of Python versions is non-trivial.
storage_copy = list(self._storage)
self._storage[key] = value
len_before = len(storage_copy)
len_now = len(self._storage)
for i in range(max(len_before, len_now)):
value_now = self._storage[i] if i < len_now else None
value_before = storage_copy[i] if i < len_before else None
if isinstance(value_before, base.Trackable):
self._non_append_mutation = True
if value_now is not None and value_now != value_before:
self._storage[i] = self._track_value(self._storage[i],
self._name_element(i))
else:
if isinstance(self._storage[key], base.Trackable):
self._non_append_mutation = True
self._storage[key] = self._track_value(value, self._name_element(key))
self._update_snapshot()
def append(self, value):
"""Add a new trackable value."""
self._check_external_modification()
super(ListWrapper, self).append(value)
self._update_snapshot()
def extend(self, values):
"""Add a sequence of trackable values."""
self._check_external_modification()
super(ListWrapper, self).extend(values)
self._update_snapshot()
def __imul__(self, y):
if y <= 0:
self._self_non_append_mutation = True
self._storage *= y
return self
# Relies on super() calling append, which updates the snapshot.
return super(ListWrapper, self).__imul__(y)
def __eq__(self, other):
return self._storage == getattr(other, "_storage", other)
def __ne__(self, other):
return self._storage != getattr(other, "_storage", other)
def __lt__(self, other):
return self._storage < getattr(other, "_storage", other)
def __le__(self, other):
return self._storage <= getattr(other, "_storage", other)
def __gt__(self, other):
return self._storage > getattr(other, "_storage", other)
def __ge__(self, other):
return self._storage >= getattr(other, "_storage", other)
def __hash__(self):
# List wrappers need to compare like regular lists, and so like regular
# lists they don't belong in hash tables.
raise TypeError("unhashable type: 'ListWrapper'")
def insert(self, index, obj):
self._non_append_mutation = True
self._storage.insert(index, obj)
def sort(self):
self._non_append_mutation = True
self._storage.sort()
def __setslice__(self, i, j, y):
self.__setitem__(slice(i, j), y)
def __delslice__(self, i, j):
self._non_append_mutation = True
del self._storage[slice(i, j)]
def _track_value(self, value, name):
"""Allows storage of non-trackable objects."""
try:
value = super(ListWrapper, self)._track_value(value=value, name=name)
except ValueError:
# Even if this value isn't trackable, we need to make sure
# NoDependency objects get unwrapped.
value = sticky_attribute_assignment(
trackable=self, value=value, name=name)
return value
def __repr__(self):
return "ListWrapper(%s)" % (repr(self._storage),)
def _list_functions_for_serialization(self, unused_functions):
return {
str(key): value for key, value in enumerate(self)
if _is_function(value)
}
class Mapping(TrackableDataStructure, collections.Mapping):
"""An append-only trackable mapping data structure with string keys.
Maintains checkpoint dependencies on its contents (which must also be
trackable), named based on its keys.
Note that once a key has been added, it may not be deleted or replaced. If
names may not be unique, see `tf.contrib.checkpoint.UniqueNameTracker`.
"""
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `dict()`."""
super(Mapping, self).__init__()
self._storage = self._make_storage(*args, **kwargs)
self._storage.update(
{key: self._track_value(
value, name=self._name_element(key))
for key, value in self._storage.items()})
def __copy__(self):
return type(self)(copy.copy(self._storage))
def __deepcopy__(self, memo):
return type(self)(copy.deepcopy(self._storage, memo))
def _make_storage(self, *args, **kwargs):
return dict(*args, **kwargs)
@property
def _values(self):
"""Collect values for TrackableDataStructure."""
# Sort items deterministically by key
ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))
if ordered:
return ordered[1]
return []
def _name_element(self, key):
if not isinstance(key, six.string_types):
raise TypeError(
"Mapping accepts only string keys, but got a key %s."
% repr(key))
return str(key)
def __setitem__(self, key, value):
name = self._name_element(key)
value = self._track_value(value, name=name)
current_value = self._storage.setdefault(key, value)
if current_value is not value:
raise ValueError(
("Mappings are an append-only data structure. Tried to overwrite the "
"key '%s' with value %s, but it already contains %s")
% (key, value, current_value))
def update(self, *args, **kwargs):
for key, value in dict(*args, **kwargs).items():
self[key] = value
def __getitem__(self, key):
return self._storage[key]
def __len__(self):
return len(self._storage)
def __repr__(self):
return "Mapping(%s)" % (repr(self._storage),)
def __iter__(self):
return iter(self._storage)
class _DictWrapper(TrackableDataStructure, wrapt.ObjectProxy):
"""Wraps built-in dicts to support restore-on-create for variables.
_DictWrapper is to Mapping as ListWrapper is to List. Unlike Mapping,
_DictWrapper allows non-string keys and values and arbitrary mutations (delete
keys, reassign values). Like ListWrapper, these mutations mean that
_DictWrapper will raise an exception on save.
"""
def __init__(self, wrapped_dict=None):
if wrapped_dict is None:
# Allow zero-argument construction, e.g. from session.run's re-wrapping.
wrapped_dict = {}
if not isinstance(wrapped_dict, collections.Mapping):
# Allow construction from a sequence, e.g. from nest.pack_sequence_as.
wrapped_dict = dict(wrapped_dict)
wrapt.ObjectProxy.__init__(self, wrapped_dict)
TrackableDataStructure.__init__(self)
self._self_non_string_key = False
self._self_external_modification = False
self.__wrapped__.update(
{key: self._track_value(
value, name=self._name_element(key))
for key, value in self.__wrapped__.items()})
self._update_snapshot()
def __reduce_ex__(self, protocol):
return (self.__class__,
(self.__wrapped__,))
def __getattribute__(self, name):
if (hasattr(type(self), name)
and isinstance(getattr(type(self), name), property)):
# Bypass ObjectProxy for properties. Whether this workaround is necessary
# appears to depend on the Python version but not the wrapt version: 3.4
# in particular seems to look up properties on the wrapped object instead
# of the wrapper without this logic.
return object.__getattribute__(self, name)
else:
return super(_DictWrapper, self).__getattribute__(name)
def copy(self):
return copy.copy(self)
# pylint: disable=protected-access
def __copy__(self):
copied = _DictWrapper(copy.copy(self.__wrapped__))
copied._self_external_modification = self._self_external_modification
copied._self_non_string_key = self._self_non_string_key
return copied
def __deepcopy__(self, memo):
copied = _DictWrapper(copy.deepcopy(self.__wrapped__, memo))
copied._self_external_modification = self._self_external_modification
copied._self_non_string_key = self._self_non_string_key
return copied
# pylint: enable=protected-access
@property
def _values(self):
"""Collect values for TrackableDataStructure."""
# Sort items deterministically by key
ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))
if ordered:
return ordered[1]
return []
@property
def _checkpoint_dependencies(self):
"""Check that the object is saveable before listing its dependencies."""
self._check_self_external_modification()
if self._self_non_string_key:
raise ValueError(
"Unable to save the object %s (a dictionary wrapper constructed "
"automatically on attribute assignment). The wrapped dictionary "
"contains a non-string key which maps to a trackable object or "
"mutable data structure.\n\nIf you don't need this dictionary "
"checkpointed, wrap it in a tf.contrib.checkpoint.NoDependency "
"object; it will be automatically un-wrapped and subsequently "
"ignored." % (self,))
if self._self_external_modification:
raise ValueError(
"Unable to save the object %s (a dictionary wrapper constructed "
"automatically on attribute assignment). The wrapped dictionary was "
"modified outside the wrapper (its final value was %s, its value "
"when a checkpoint dependency was added was %s), which breaks "
"restoration on object creation.\n\nIf you don't need this "
"dictionary checkpointed, wrap it in a "
"tf.contrib.checkpoint.NoDependency object; it will be automatically "
"un-wrapped and subsequently ignored." % (
self, self, self._self_last_wrapped_dict_snapshot))
assert not self._dirty # Any reason for dirtiness should have an exception.
return super(_DictWrapper, self)._checkpoint_dependencies
@property
def _dirty(self):
"""Check if there has already been a mutation which prevents saving."""
return (self._self_external_modification
or self._self_non_string_key)
def _check_self_external_modification(self):
"""Checks for any changes to the wrapped dict not through the wrapper."""
if self._dirty:
return
if self != self._self_last_wrapped_dict_snapshot:
self._self_external_modification = True
self._self_last_wrapped_dict_snapshot = None
def _update_snapshot(self):
"""Acknowledges tracked changes to the wrapped dict."""
if self._dirty:
return
self._self_last_wrapped_dict_snapshot = dict(self)
def _track_value(self, value, name):
"""Allows storage of non-trackable objects."""
if isinstance(name, six.string_types):
string_key = True
else:
name = "-non_string_key"
string_key = False
try:
no_dependency = isinstance(value, NoDependency)
value = super(_DictWrapper, self)._track_value(value=value, name=name)
if not (string_key or no_dependency):
# A non-string key maps to a trackable value. This data structure
# is not saveable.
self._self_non_string_key = True
return value
except ValueError:
# Even if this value isn't trackable, we need to make sure
# NoDependency objects get unwrapped.
return sticky_attribute_assignment(
trackable=self, value=value, name=name)
def _name_element(self, key):
"""Tells TrackableDataStructure to use keys as names as-is."""
return key
def __setitem__(self, key, value):
"""Allow any modifications, but possibly mark the wrapper as unsaveable."""
self._check_self_external_modification()
self._maybe_initialize_trackable()
no_dep = isinstance(value, NoDependency)
if isinstance(key, six.string_types):
value = self._track_value(value, name=key)
else:
value = _wrap_or_unwrap(value)
if not no_dep and isinstance(value, base.Trackable):
# Non-string keys are OK as long as we have no reason to add a
# dependency on the value (either because the value is not
# trackable, or because it was wrapped in a NoDependency object).
self._self_non_string_key = True
self.__wrapped__[key] = value
self._update_snapshot()
def __delitem__(self, key):
self._check_self_external_modification()
del self.__wrapped__[key]
self._update_snapshot()
def __repr__(self):
return "DictWrapper(%s)" % (repr(self.__wrapped__),)
def __hash__(self):
raise TypeError("unhashable type: 'DictWrapper'")
def __eq__(self, other):
# Override the TrackableDataStructure "== -> is" forwarding and go back to
# the wrapt implementation.
return self.__wrapped__ == other
def update(self, *args, **kwargs):
for key, value in six.iteritems(dict(*args, **kwargs)):
self[key] = value
def _list_functions_for_serialization(self, unused_serialization_cache):
return {
key: value for key, value in self.items()
if _is_function(value)
}
def _is_function(x):
return isinstance(x, (def_function.Function, defun.ConcreteFunction))
revived_types.register_revived_type(
"trackable_dict_wrapper",
lambda obj: isinstance(obj, _DictWrapper),
versions=[revived_types.VersionedTypeRegistration(
# Standard dependencies are enough to reconstruct the trackable
# items in dictionaries, so we don't need to save any extra information.
object_factory=lambda proto: _DictWrapper({}),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=operator.setitem)])
def _set_list_item(list_object, index_string, value):
item_index = int(index_string)
if len(list_object) <= item_index:
list_object.extend([None] * (1 + item_index - len(list_object)))
list_object[item_index] = value
revived_types.register_revived_type(
"trackable_list_wrapper",
lambda obj: isinstance(obj, ListWrapper),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda proto: ListWrapper([]),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=_set_list_item)])
|
tensorflow-master
|
tensorflow/python/training/tracking/data_structures.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import os
import pickle
import numpy
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import normalization
from tensorflow.python.layers import core as non_keras_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
class HasList(training.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = data_structures.List([core.Dense(3)])
self.layer_list.append(core.Dense(4))
self.layer_list.extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_list += [
core.Dense(7, bias_regularizer=math_ops.reduce_sum),
core.Dense(8)
]
self.layer_list += (
data_structures.List([core.Dense(9)]) + data_structures.List(
[core.Dense(10)]))
self.layer_list.extend(
data_structures.List(
list([core.Dense(11)]) + [core.Dense(12)]))
self.layers_with_updates = data_structures.List(
(normalization.BatchNormalization(),))
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class ListTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTracking(self):
model = HasList()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 12], output.shape)
self.assertEqual(11, len(model.layers))
self.assertEqual(10, len(model.layer_list.layers))
six.assertCountEqual(
self,
model.layers,
model.layer_list.layers + model.layers_with_updates)
for index in range(10):
self.assertEqual(3 + index, model.layer_list.layers[index].units)
self.assertEqual(2, len(model._checkpoint_dependencies))
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertEqual(
10, len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = [v]
self.assertIn(v, model.variables)
self.assertIn(v, model.trainable_variables)
self.assertNotIn(v, model.non_trainable_variables)
self.assertIn(model.layer_list[0].trainable_weights[0],
model.trainable_weights)
def testSubModelTracking(self):
model = training.Model()
model.v = variables.Variable(1.)
self.assertIn(model.v, model.trainable_weights)
model2 = training.Model()
model2.m = [model]
self.assertIn(model.v, model2.trainable_weights)
def testSubSequentialTracking(self):
class _Subclassed(training.Model):
def __init__(self, wrapped):
super(_Subclassed, self).__init__()
self._wrapped = wrapped
def call(self, x):
return self._wrapped(x)
model = sequential.Sequential()
layer = core.Dense(1)
model.add(layer)
model2 = _Subclassed(model)
model2(array_ops.ones([1, 2]))
model2.m = [model]
self.assertIn(layer.kernel, model2.trainable_weights)
def testLayerTrackedThroughSequential(self):
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def ffnet(layer_sizes, name):
ff = sequential.Sequential(name=name)
for i, width in enumerate(layer_sizes):
ff.add(core.Dense(
width,
activation=("relu" if i < len(layer_sizes)-1 else None)))
return ff
class MyModel2(training.Model):
def __init__(self, config, name="my_model_2"):
super(MyModel2, self).__init__(name=name)
self._num_tokens = config.num_tokens
# list of sub-models
self._ffnet = [ffnet(config.module_layers + (self._num_tokens,), "ff")]
def null_input(self):
return array_ops.zeros([1, self._num_tokens], dtype=dtypes.float32)
def call(self, input_, module_index=None):
return self._ffnet[0](input_)
m2 = MyModel2(AttrDict(
num_tokens=5,
module_layers=(50, 30)))
# Construct
m2(m2.null_input())
self.assertLen(m2.trainable_variables, 6)
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.l = [1]
json.dumps(obj.l, default=serialization.get_json_type)
@test_util.run_v1_only("b/120545219")
def testUpdatesForwarded(self):
with context.graph_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertGreater(len(model.layers_with_updates[0].updates), 0)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
with context.eager_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(0, len(model.updates))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testLossesForwarded(self):
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(2, len(model.losses))
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = []
self.l2 = []
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1.append(first_layer)
second_layer = HasEqualContainers()
model.l2.append(second_layer)
self.assertEqual([first_layer, second_layer], model.layers)
def testNotTrackable(self):
class NotTrackable(object):
pass
with self.assertRaises(ValueError):
data_structures.List([NotTrackable()])
def testCallNotImplemented(self):
with self.assertRaisesRegexp(TypeError, "not callable"):
data_structures.List()(1.)
def testNoPop(self):
with self.assertRaises(AttributeError):
data_structures.List().pop()
@test_util.run_in_graph_and_eager_modes
def testTensorConversion(self):
class ListToTensor(training.Model):
def __init__(self):
super(ListToTensor, self).__init__()
self.l = [1., 2., 3.]
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(constant_op.constant(ListToTensor().l)))
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(array_ops.pack(ListToTensor().l)))
def testNesting(self):
with context.graph_mode():
inner = data_structures.List()
outer = data_structures.List([inner])
inner.append(non_keras_core.Dense(1))
inner[0](array_ops.ones([2, 3]))
self.assertEqual(2, len(outer.variables))
self.assertIsInstance(
outer.variables[0],
resource_variable_ops.ResourceVariable)
def testNonLayerVariables(self):
v = resource_variable_ops.ResourceVariable([1.])
l = data_structures.List([v])
self.assertTrue(l.trainable)
self.assertEqual([], l.layers)
self.assertEqual([v], l.variables)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([], l.non_trainable_variables)
l.trainable = False
self.assertEqual([v], l.variables)
self.assertEqual([], l.trainable_variables)
self.assertEqual([v], l.non_trainable_variables)
l.trainable = True
v2 = resource_variable_ops.ResourceVariable(1., trainable=False)
l.append(v2)
self.assertEqual([v, v2], l.weights)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([v2], l.non_trainable_weights)
def testCopy(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
l1 = data_structures.List([v1, v2])
l2 = l1.copy()
l2.append(v3)
self.assertEqual(list(l1), [v1, v2])
self.assertEqual(list(l2), [v1, v2, v3])
def testSlicing(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
v4 = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v1, v2, v3, v4])
self.assertEqual(l[1:], [v2, v3, v4])
self.assertEqual(l[1:-1], [v2, v3])
self.assertEqual(l[:-1], [v1, v2, v3])
def testHash(self):
has_sequences = set([data_structures.List(),
data_structures.List()])
self.assertEqual(2, len(has_sequences))
self.assertNotIn(data_structures.List(), has_sequences)
def testIMul_zero(self):
l = data_structures.List([])
with self.assertRaisesRegexp(ValueError, "List only supports append"):
l *= 0
def testIMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v])
l *= 2
self.assertEqual(list(l), [v] * 2)
def testMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(l * 2), [v, v, v] * 2)
def testRMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(2 * l), [v, v, v] * 2)
class ListWrapperTest(test.TestCase):
IGNORED = ("__new__", "__init__", "__subclasshook__", "__getattribute__")
def test_overrides_all_list_methods(self):
not_overridden = []
for name in dir(list):
if name in ListWrapperTest.IGNORED:
continue
list_method = getattr(list, name)
if not callable(list_method):
continue
object_method = getattr(object, name, None)
if object_method is not None and object_method == list_method:
# Skip methods that aren't overridden from object.
continue
if list_method == getattr(data_structures.ListWrapper, name):
not_overridden.append(name)
if not_overridden:
self.fail("ListWrapper does not override %s" % (not_overridden))
def testPickle(self):
original = data_structures.ListWrapper([1, 2])
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual([1, 2], deserialized)
def testSameStructure(self):
l = [1]
nest.assert_same_structure(l, data_structures.ListWrapper(copy.copy(l)))
def testFunctionCaching(self):
@def_function.function
def f(list_input):
return list_input[0] + constant_op.constant(1.)
first_trace = f.get_concrete_function([constant_op.constant(2.)])
second_trace = f.get_concrete_function(
data_structures.ListWrapper([constant_op.constant(3.)]))
self.assertIs(first_trace, second_trace)
def testListWrapperBasic(self):
# ListWrapper, unlike List, compares like the built-in list type (since it
# is used to automatically replace lists).
a = tracking.AutoTrackable()
b = tracking.AutoTrackable()
self.assertEqual([a, a],
[a, a])
self.assertEqual(data_structures.ListWrapper([a, a]),
data_structures.ListWrapper([a, a]))
self.assertEqual([a, a],
data_structures.ListWrapper([a, a]))
self.assertEqual(data_structures.ListWrapper([a, a]),
[a, a])
self.assertNotEqual([a, a],
[b, a])
self.assertNotEqual(data_structures.ListWrapper([a, a]),
data_structures.ListWrapper([b, a]))
self.assertNotEqual([a, a],
data_structures.ListWrapper([b, a]))
self.assertLess([a], [a, b])
self.assertLess(data_structures.ListWrapper([a]),
data_structures.ListWrapper([a, b]))
self.assertLessEqual([a], [a, b])
self.assertLessEqual(data_structures.ListWrapper([a]),
data_structures.ListWrapper([a, b]))
self.assertGreater([a, b], [a])
self.assertGreater(data_structures.ListWrapper([a, b]),
data_structures.ListWrapper([a]))
self.assertGreaterEqual([a, b], [a])
self.assertGreaterEqual(data_structures.ListWrapper([a, b]),
data_structures.ListWrapper([a]))
self.assertEqual([a], data_structures.ListWrapper([a]))
self.assertEqual([a], list(data_structures.List([a])))
self.assertEqual([a, a], data_structures.ListWrapper([a]) + [a])
self.assertEqual([a, a], [a] + data_structures.ListWrapper([a]))
self.assertIsInstance(data_structures.ListWrapper([a]), list)
def testAcceptsNonTrackableContent(self):
l = data_structures.ListWrapper([1, 2, 3])
self.assertEqual(l, [1, 2, 3])
def testWrapperChangesList(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
l_wrapper.append(1)
self.assertEqual([1], l)
def testListChangesWrapper(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
l.append(1)
self.assertEqual([1], l_wrapper)
def testLayerCollectionWithExternalMutation(self):
l = []
l_wrapper = data_structures.ListWrapper(l)
layer = core.Dense(1)
l.append(layer)
self.assertEqual([layer], l_wrapper.layers)
def testNotHashable(self):
with self.assertRaises(TypeError):
hash(data_structures.ListWrapper())
def testDelItem(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
del l[0]
self.assertEqual(l, [2, 3, 4])
self.assertUnableToSave(l, "Unable to save .*__delitem__")
def testDelSlice(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
del l[2:3]
self.assertEqual(l, [1, 2, 4])
self.assertUnableToSave(l, "Unable to save .*__delslice__")
def testSetSlice_canSaveForNonTrackableItems(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[:] = 2, 8, 9, 0
self.assertEqual(l, [2, 8, 9, 0])
l._maybe_initialize_trackable() # pylint: disable=protected-access
self.assertEqual(len(l._checkpoint_dependencies), 0) # pylint: disable=protected-access
def testSetSlice_cannotSaveIfTrackableModified(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
l = data_structures.ListWrapper([1, 2, v1, v2])
l[:] = 2, 8, 9, v2
self.assertEqual(l, [2, 8, 9, v2])
self.assertUnableToSave(l, "Unable to save .*__setslice__")
def testSetSlice_truncate(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[:] = []
self.assertEqual(l, [])
def testSetSlice_extend(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l[2:] = 1, 2, 3, 4
self.assertEqual(l, [1, 2, 1, 2, 3, 4])
def testIMulNegative(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l *= -1
self.assertEqual(l, [1, 2, 3, 4] * -1)
self.assertUnableToSave(l, "Unable to save")
def testIMulPositive(self):
v = variables.Variable(1.)
l = data_structures.ListWrapper([1, 2, 3, 4, v])
self.assertEqual([("4", v)], l._checkpoint_dependencies)
root = util.Checkpoint(l=l)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
path = root.save(prefix)
v.assign(5.)
l *= 2
self.assertEqual(l, [1, 2, 3, 4, v, 1, 2, 3, 4, v])
self.assertEqual([("4", v), ("9", v)], l._checkpoint_dependencies)
root.restore(path)
self.assertAllClose(1., v.numpy())
def testSort(self):
l = data_structures.ListWrapper([1, 2, 3, 4])
l.sort()
self.assertEqual(l, [1, 2, 3, 4])
# Regardless of being a no-op for the input list, we still refuse to save.
# This is intentional since otherwise we would end up with a hard to debug
# case for users (e.g. sometimes sort on a ListWrapper is trackable and
# other times it is not).
self.assertUnableToSave(l, "Unable to save .*sort")
def assertUnableToSave(self, l, msg):
l._maybe_initialize_trackable() # pylint: disable=protected-access
with self.assertRaisesRegexp(ValueError, msg):
return l._checkpoint_dependencies # pylint: disable=protected-access
class HasMapping(training.Model):
def __init__(self):
super(HasMapping, self).__init__()
self.layer_dict = data_structures.Mapping(output=core.Dense(7))
self.layer_dict["norm"] = data_structures.List()
self.layer_dict["dense"] = data_structures.List()
self.layer_dict["dense"].extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_dict["norm"].append(
normalization.BatchNormalization())
self.layer_dict["norm"].append(
normalization.BatchNormalization())
def call(self, x):
aggregation = 0.
for norm, dense in zip(self.layer_dict["norm"], self.layer_dict["dense"]):
x = norm(dense(x))
aggregation += math_ops.reduce_sum(x)
return self.layer_dict["output"](x) / aggregation
class MappingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTracking(self):
model = HasMapping()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 7], output.shape)
self.assertEqual(5, len(model.layers))
six.assertCountEqual(self, model.layers, model.layer_dict.layers)
self.assertEqual(1, len(model._checkpoint_dependencies))
self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)
self.evaluate([v.initializer for v in model.variables])
test_var = model.layer_dict["output"].kernel
self.evaluate(test_var.assign(array_ops.ones([6, 7])))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(test_var.assign(array_ops.zeros([6, 7])))
model.load_weights(save_path)
self.assertAllEqual(numpy.ones([6, 7]),
self.evaluate(test_var))
def testJSONSerialization(self):
obj = tracking.AutoTrackable()
obj.d = {"a": 2}
json.dumps(obj.d, default=serialization.get_json_type)
def testNoOverwrite(self):
mapping = data_structures.Mapping()
original = data_structures.List()
mapping["a"] = original
with self.assertRaises(ValueError):
mapping["a"] = data_structures.List()
self.assertIs(original, mapping["a"])
with self.assertRaises(AttributeError):
del mapping["a"]
mapping.update(b=data_structures.Mapping())
with self.assertRaises(ValueError):
mapping.update({"b": data_structures.Mapping()})
def testNonStringKeys(self):
mapping = data_structures.Mapping()
with self.assertRaises(TypeError):
mapping[1] = data_structures.List()
def testLayerCollectionWithExternalMutation(self):
d = {}
root = tracking.AutoTrackable()
root.wrapper = d
self.assertEqual([], root.wrapper.layers)
self.assertEqual([], root.wrapper.trainable_weights)
layer1 = core.Dense(1)
layer2 = core.Dense(1)
d["a"] = layer1
d["b"] = layer2
self.assertEqual([layer1, layer2], root.wrapper.layers)
# The layers have still not created variables
self.assertEqual([], root.wrapper.trainable_weights)
def testHashing(self):
has_mappings = set([data_structures.Mapping(),
data_structures.Mapping()])
self.assertEqual(2, len(has_mappings))
self.assertNotIn(data_structures.Mapping(), has_mappings)
# In contrast to Mapping, dict wrappers are not hashable
a = tracking.AutoTrackable()
a.d = {}
self.assertEqual({}, a.d)
self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison
self.assertNotEqual({1: 2}, a.d)
with self.assertRaisesRegexp(TypeError, "unhashable"):
set([a.d])
def testDictWrapperBadKeys(self):
a = tracking.AutoTrackable()
a.d = {}
a.d[1] = data_structures.List()
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "non-string key"):
model.save_weights(save_path)
def testDictWrapperNoDependency(self):
a = tracking.AutoTrackable()
a.d = data_structures.NoDependency({})
a.d[1] = [3]
self.assertEqual([a], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonStringKeyNotTrackableValue(self):
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = data_structures.NoDependency([3])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonAppendNotTrackable(self):
# Non-append mutations (deleting or overwriting values) are OK when the
# values aren't tracked.
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = 3
a.d[1] = 2
self.assertEqual(2, a.d[1])
del a.d[1]
a.d[2] = data_structures.NoDependency(tracking.AutoTrackable())
second = tracking.AutoTrackable()
a.d[2] = data_structures.NoDependency(second)
self.assertIs(second, a.d[2])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testPopNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = []
model.d.pop("a")
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "Unable to save"):
model.save_weights(save_path)
def testExternalModificationNoSave(self):
model = training.Model()
external_reference = {}
model.d = external_reference
external_reference["a"] = []
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "modified outside the wrapper"):
model.save_weights(save_path)
def testOverwriteCanStillSave(self):
model = training.Model()
model.d = {}
model.d["a"] = {}
model.d["a"] = {}
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
def testIter(self):
model = training.Model()
model.d = {1: 3}
model.d[1] = 3
self.assertEqual([1], list(model.d))
new_dict = {}
# This update() is super tricky. If the dict wrapper subclasses dict,
# CPython will access its storage directly instead of calling any
# methods/properties on the object. So the options are either not to
# subclass dict (in which case update will call normal iter methods, but the
# object won't pass isinstance checks) or to subclass dict and keep that
# storage updated (no shadowing all its methods like ListWrapper).
new_dict.update(model.d)
self.assertEqual({1: 3}, new_dict)
def testListShallowCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.copy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIs(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testListDeepCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.deepcopy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testDictShallowCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.copy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
copied = root.a.copy()
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testDictDeepCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.deepcopy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testShallowCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
shallow_copied = copy.copy(original)
self.assertIs(original_sub, shallow_copied.b["a"])
self.assertIsNot(original, shallow_copied)
self.assertEqual([[1.]], shallow_copied.a)
shallow_deps = util.list_objects(shallow_copied)
self.assertIn(shallow_copied.a, shallow_deps)
self.assertIn(shallow_copied.b, shallow_deps)
self.assertIn(shallow_copied.b["a"], shallow_deps)
def testDeepCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
self.assertIsInstance(original.b, dict)
deep_copied = copy.deepcopy(original)
self.assertIsInstance(deep_copied.b, dict)
self.assertIsNot(original, deep_copied)
self.assertIsNot(original_sub, deep_copied.b["a"])
self.assertEqual([[1.]], deep_copied.a)
self.assertIsInstance(deep_copied.b["a"], tracking.AutoTrackable)
deps = util.list_objects(deep_copied)
self.assertIn(deep_copied.a, deps)
self.assertIn(deep_copied.b, deps)
self.assertIn(deep_copied.b["a"], deps)
self.assertNotIn(original_sub, deps)
def testConstructableFromSequence(self):
result = data_structures._DictWrapper([(1, 2), (3, 4)])
self.assertIsInstance(result, dict)
self.assertEqual({1: 2, 3: 4}, result)
def testPickle(self):
original = data_structures._DictWrapper(dict(a=1, b=2))
serialized = pickle.dumps(original)
del original
deserialized = pickle.loads(serialized)
self.assertEqual(dict(a=1, b=2), deserialized)
def testListAddOrder(self):
self.assertEqual([1., 2.],
data_structures.ListWrapper([1.])
+ data_structures.ListWrapper([2.]))
self.assertEqual([1., 2.],
data_structures.ListWrapper([1.])
+ [2.])
self.assertEqual([1., 2.],
[1.]
+ data_structures.ListWrapper([2.]))
def testSameStructure(self):
d = {1: "a"}
nest.assert_same_structure(d, data_structures._DictWrapper(d.copy()))
def testFunctionCaching(self):
@def_function.function
def f(dict_input):
return dict_input["x"] + constant_op.constant(1.)
first_trace = f.get_concrete_function({"x": constant_op.constant(2.)})
second_trace = f.get_concrete_function(
data_structures._DictWrapper({"x": constant_op.constant(3.)}))
self.assertIs(first_trace, second_trace)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/data_structures_test.py
|
"""Utilities for collecting objects based on "is" comparison."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import weakref
class _ObjectIdentityWrapper(object):
"""Wraps an object, mapping __eq__ on wrapper to "is" on wrapped.
Since __eq__ is based on object identity, it's safe to also define __hash__
based on object ids. This lets us add unhashable types like trackable
_ListWrapper objects to object-identity collections.
"""
def __init__(self, wrapped):
self._wrapped = wrapped
@property
def unwrapped(self):
return self._wrapped
def __eq__(self, other):
if isinstance(other, _ObjectIdentityWrapper):
return self._wrapped is other._wrapped # pylint: disable=protected-access
return self._wrapped is other
def __hash__(self):
# Wrapper id() is also fine for weakrefs. In fact, we rely on
# id(weakref.ref(a)) == id(weakref.ref(a)) and weakref.ref(a) is
# weakref.ref(a) in _WeakObjectIdentityWrapper.
return id(self._wrapped)
class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):
def __init__(self, wrapped):
super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped))
@property
def unwrapped(self):
return self._wrapped()
class ObjectIdentityDictionary(collections.MutableMapping):
"""A mutable mapping data structure which compares using "is".
This is necessary because we have trackable objects (_ListWrapper) which
have behavior identical to built-in Python lists (including being unhashable
and comparing based on the equality of their contents by default).
"""
def __init__(self):
self._storage = {}
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __getitem__(self, key):
return self._storage[self._wrap_key(key)]
def __setitem__(self, key, value):
self._storage[self._wrap_key(key)] = value
def __delitem__(self, key):
del self._storage[self._wrap_key(key)]
def __len__(self):
return len(self._storage)
def __iter__(self):
for key in self._storage:
yield key.unwrapped
class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):
"""Like weakref.WeakKeyDictionary, but compares objects with "is"."""
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len(list(self._storage))
def __iter__(self):
keys = self._storage.keys()
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
del self[key]
else:
yield unwrapped
class ObjectIdentitySet(collections.MutableSet):
"""Like the built-in set, but compares objects with "is"."""
def __init__(self, *args):
self._storage = set([self._wrap_key(obj) for obj in list(*args)])
def _wrap_key(self, key):
return _ObjectIdentityWrapper(key)
def __contains__(self, key):
return self._wrap_key(key) in self._storage
def discard(self, key):
self._storage.discard(self._wrap_key(key))
def add(self, key):
self._storage.add(self._wrap_key(key))
def __len__(self):
return len(self._storage)
def __iter__(self):
keys = list(self._storage)
for key in keys:
yield key.unwrapped
class ObjectIdentityWeakSet(ObjectIdentitySet):
"""Like weakref.WeakSet, but compares objects with "is"."""
def _wrap_key(self, key):
return _WeakObjectIdentityWrapper(key)
def __len__(self):
# Iterate, discarding old weak refs
return len([_ for _ in self])
def __iter__(self):
keys = list(self._storage)
for key in keys:
unwrapped = key.unwrapped
if unwrapped is None:
self.discard(key)
else:
yield unwrapped
|
tensorflow-master
|
tensorflow/python/training/tracking/object_identity.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for checkpoint-related APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import util
class _TrivialSaveable(saveable_object.SaveableObject):
def __init__(self, name):
op = lambda: array_ops.ones([])
super(_TrivialSaveable, self).__init__(
op=op,
specs=[saveable_object.SaveSpec(
op, "", name, dtype=dtypes.float32, device="CPU:0")],
name=name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _TrivialRestore(base.Trackable):
def _gather_saveables_for_checkpoint(self):
return {base.VARIABLE_VALUE_KEY: _TrivialSaveable}
class _LazyTrivialObjects(module.Module):
def __init__(self):
self.existing = [_TrivialRestore() for _ in range(5)]
self.lazy = []
def __call__(self):
if not self.lazy:
self.lazy.extend(_TrivialRestore() for _ in range(5))
return
def _save_checkpoint():
original_checkpoint = util.Checkpoint(m=_LazyTrivialObjects())
original_checkpoint.m()
return original_checkpoint.write(os.path.join(test.get_temp_dir(), "ckpt"))
class SavingBenchmarks(test.Benchmark):
def _run(self, func, num_iters, execution_mode=None):
func()
start = time.time()
for _ in xrange(num_iters):
func()
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_baseline_no_restore(self):
def _create_and_call():
checkpoint = util.Checkpoint(m=_LazyTrivialObjects())
checkpoint.m()
self._run(_create_and_call, 3)
def benchmark_batch_restore(self):
checkpoint_path = _save_checkpoint()
def _create_and_call():
checkpoint = util.Checkpoint(m=_LazyTrivialObjects())
checkpoint.m()
checkpoint.restore(checkpoint_path)
self._run(_create_and_call, 3)
def benchmark_restore_on_create(self):
checkpoint_path = _save_checkpoint()
def _create_and_call():
checkpoint = util.Checkpoint(m=_LazyTrivialObjects())
checkpoint.restore(checkpoint_path)
checkpoint.m()
self._run(_create_and_call, 3)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/benchmarks_test.py
|
"""Dependency tracking for trackable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import tf_contextlib
# global _RESOURCE_TRACKER_STACK
_RESOURCE_TRACKER_STACK = []
class NotTrackable(object):
"""Marks instances of child classes as unsaveable using an object-based API.
Useful for marking objects which would otherwise look trackable because
of inheritance (e.g. through `Layer`) as not trackable. Inheriting from
`NotTrackable` does not prevent an object from being assigned to any
attributes, but will throw an error on save/restore.
"""
pass
class AutoTrackable(base.Trackable):
"""Manages dependencies on other objects.
`Trackable` objects may have dependencies: other `Trackable` objects
which should be saved if the object declaring the dependency is saved. A
correctly saveable program has a dependency graph such that if changing a
global variable affects an object (e.g. changes the behavior of any of its
methods) then there is a chain of dependencies from the influenced object to
the variable.
Dependency edges have names, and are created implicitly when a
`Trackable` object is assigned to an attribute of another
`Trackable` object. For example:
```
obj = Trackable()
obj.v = ResourceVariable(0.)
```
The `Trackable` object `obj` now has a dependency named "v" on a
variable.
`Trackable` objects may specify `Tensor`s to be saved and restored
directly (e.g. a `Variable` indicating how to save itself) rather than through
dependencies on other objects. See
`Trackable._gather_saveables_for_checkpoint` for details.
"""
def __setattr__(self, name, value):
"""Support self.foo = trackable syntax."""
try:
if getattr(self, name) is value:
# Short circuit for `self.$x = self.$x`.
return
except AttributeError:
pass
if getattr(self, "_self_setattr_tracking", True):
value = data_structures.sticky_attribute_assignment(
trackable=self, value=value, name=name)
super(AutoTrackable, self).__setattr__(name, value)
def __delattr__(self, name):
self._maybe_initialize_trackable()
delete_tracking(self, name)
super(AutoTrackable, self).__delattr__(name)
def _no_dependency(self, value):
"""Override to allow TrackableBase to disable dependency tracking."""
return data_structures.NoDependency(value)
def _list_functions_for_serialization(self, unused_serialization_cache):
"""Return a dict of `Function`s of a trackable."""
functions = {}
for attribute_name in dir(self):
try:
attribute_value = getattr(self, attribute_name, None)
except Exception: # pylint: disable=broad-except
# We really don't want to throw an exception just because some object's
# attribute accessor is broken.
attribute_value = None
if isinstance(attribute_value, (def_function.Function,
defun.ConcreteFunction)):
functions[attribute_name] = attribute_value
return functions
def delete_tracking(obj, name):
"""Removes the tracking of name from object."""
# pylint: disable=protected-access
if name in obj._unconditional_dependency_names:
del obj._unconditional_dependency_names[name]
for index, (dep_name, _) in enumerate(
obj._unconditional_checkpoint_dependencies):
if dep_name == name:
del obj._unconditional_checkpoint_dependencies[index]
break
# pylint: enable=protected-access
class ResourceTracker(object):
"""An object that tracks a list of resources."""
def __init__(self):
self._resources = []
@property
def resources(self):
return self._resources
def add_resource(self, resource):
self._resources.append(resource)
@tf_contextlib.contextmanager
def resource_tracker_scope(resource_tracker):
"""A context to manage resource trackers.
Use this in order to collect up all resources created within a block of code.
Example usage:
```python
resource_tracker = ResourceTracker()
with resource_tracker_scope(resource_tracker):
resource = TrackableResource()
assert resource_tracker.resources == [resource]
Args:
resource_tracker: The passed in ResourceTracker object
Yields:
A scope in which the resource_tracker is active.
"""
global _RESOURCE_TRACKER_STACK
old = list(_RESOURCE_TRACKER_STACK)
_RESOURCE_TRACKER_STACK.append(resource_tracker)
try:
yield
finally:
_RESOURCE_TRACKER_STACK = old
class CapturableResource(base.Trackable):
"""Holds a Tensor which a tf.function can capture.
`CapturableResource`s are discovered by traversing the graph of object
attributes, e.g. during `tf.saved_model.save`. They are excluded from the
scope-based tracking of `TrackableResource`; generally things that require
initialization should inherit from `TrackableResource` instead of
`CapturableResource` directly.
"""
def __init__(self, device=""):
"""Initialize the `CapturableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
"""
self._resource_handle = None
self._resource_device = device
def _create_resource(self):
"""A function that creates a resource handle."""
raise NotImplementedError("TrackableResource._create_resource not "
"implemented.")
def _initialize(self):
"""A function that initializes the resource. Optional."""
pass
@property
def resource_handle(self):
"""Returns the resource handle associated with this Resource."""
if self._resource_handle is None:
with ops.device(self._resource_device):
self._resource_handle = self._create_resource()
return self._resource_handle
def _list_functions_for_serialization(self, unused_functions):
@def_function.function(input_signature=[], autograph=False)
def _creator():
resource = self._create_resource()
return resource
@def_function.function(input_signature=[], autograph=False)
def _initializer():
self._initialize()
return 1 # Dummy return
return {
"_create_resource": _creator,
"_initialize": _initializer,
}
class TrackableResource(CapturableResource):
"""Adds scope tracking to CapturableResource."""
def __init__(self, device=""):
"""Initialize the `TrackableResource`.
Args:
device: A string indicating a required placement for this resource,
e.g. "CPU" if this resource must be created on a CPU device. A blank
device allows the user to place resource creation, so generally this
should be blank unless the resource only makes sense on one device.
"""
global _RESOURCE_TRACKER_STACK
for resource_tracker in _RESOURCE_TRACKER_STACK:
resource_tracker.add_resource(self)
super(TrackableResource, self).__init__(device=device)
class TrackableAsset(base.Trackable):
"""Base class for asset files which need to be tracked."""
def __init__(self, path):
"""Record the full path to the asset."""
# The init_scope prevents functions from capturing `path` in an
# initialization graph, since it is transient and should not end up in a
# serialized function body.
with ops.init_scope(), ops.device("CPU"):
self._path = ops.internal_convert_to_tensor(path, dtype=dtypes.string,
name="asset_path")
@property
def asset_path(self):
"""Fetch the current asset path."""
return self._path
def cached_per_instance(f):
"""Lightweight decorator for caching lazily constructed properties.
When to use:
This decorator provides simple caching with minimal overhead. It is designed
for properties which are expensive to compute and static over the life of a
class instance, and provides no mechanism for cache invalidation. Thus it is
best suited for lazily exposing derived properties of other static data.
For classes with custom getattr / setattr behavior (such as trackable
objects), storing cache results as object attributes is not performant.
Instead, a specialized cache can significantly reduce property lookup
overhead. (While still allowing the decorated property to be lazily computed.)
Consider the following class:
```
class MyClass(object):
def __setattr__(self, key, value):
# Some expensive class specific code
# ...
# ...
super(MyClass, self).__setattr__(key, value)
@property
def thing(self):
# `thing` is expensive to compute (and may not even be requested), so we
# want to lazily compute it and then cache it.
output = getattr(self, '_thing', None)
if output is None:
self._thing = output = compute_thing(self)
return output
```
It's also worth noting that ANY overriding of __setattr__, even something as
simple as:
```
def __setattr__(self, key, value):
super(MyClass, self).__setattr__(key, value)
```
Slows down attribute assignment by nearly 10x.
By contrast, replacing the definition of `thing` with the following sidesteps
the expensive __setattr__ altogether:
'''
@property
@tracking.cached_per_instance
def thing(self):
# `thing` is expensive to compute (and may not even be requested), so we
# want to lazily compute it and then cache it.
return compute_thing(self)
'''
Performance:
The overhead for this decorator is ~0.4 us / call. A much lower overhead
implementation (~0.085 us / call) can be achieved by using a custom dict type:
```
def dict_based_cache(f):
class Cache(dict):
__slots__ = ()
def __missing__(self, key):
self[key] = output = f(key)
return output
return property(Cache().__getitem__)
```
However, that implementation holds class instances as keys, and as a result
blocks garbage collection. (And modifying it to use weakref's as keys raises
the lookup overhead to ~0.4 us) As a result, the WeakKeyDictionary
implementation below turns out to be more prudent.
Args:
f: The function to cache.
Returns:
f decorated with simple caching behavior.
"""
cache = weakref.WeakKeyDictionary()
@functools.wraps(f)
def wrapped(item):
output = cache.get(item)
if output is None:
cache[item] = output = f(item)
return output
return wrapped
ops.register_tensor_conversion_function(
TrackableAsset,
lambda asset, **kw: ops.internal_convert_to_tensor(asset.asset_path, **kw))
|
tensorflow-master
|
tensorflow/python/training/tracking/tracking.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import multiprocessing.dummy
import os
import pickle
import time
import timeit
import numpy as np
import six
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import base
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import nest
_PICKLEABLE_CALL_COUNT = collections.Counter()
class MyPickleableObject(tracking.AutoTrackable):
"""Needed for InterfaceTests.test_property_cache_serialization.
This class must be at the top level. This is a constraint of pickle,
unrelated to `cached_per_instance`.
"""
@property
@tracking.cached_per_instance
def my_id(self):
_PICKLEABLE_CALL_COUNT[self] += 1
return id(self)
class InterfaceTests(test.TestCase):
def testMultipleAssignment(self):
root = tracking.AutoTrackable()
root.leaf = tracking.AutoTrackable()
root.leaf = root.leaf
duplicate_name_dep = tracking.AutoTrackable()
with self.assertRaisesRegexp(ValueError, "already declared"):
root._track_trackable(duplicate_name_dep, name="leaf")
# No error; we're overriding __setattr__, so we can't really stop people
# from doing this while maintaining backward compatibility.
root.leaf = duplicate_name_dep
root._track_trackable(duplicate_name_dep, name="leaf", overwrite=True)
self.assertIs(duplicate_name_dep, root._lookup_dependency("leaf"))
(_, dep_object), = root._checkpoint_dependencies
self.assertIs(duplicate_name_dep, dep_object)
def testNoDependency(self):
root = tracking.AutoTrackable()
hasdep = tracking.AutoTrackable()
root.hasdep = hasdep
nodep = tracking.AutoTrackable()
root.nodep = data_structures.NoDependency(nodep)
self.assertEqual(1, len(root._checkpoint_dependencies))
self.assertIs(root._checkpoint_dependencies[0].ref, root.hasdep)
self.assertIs(root.hasdep, hasdep)
self.assertIs(root.nodep, nodep)
class NoDependencyModel(training.Model):
@base.no_automatic_dependency_tracking
def __init__(self):
super(NoDependencyModel, self).__init__()
self.a = []
self.b = tracking.AutoTrackable()
nodeps = NoDependencyModel()
self.assertEqual([nodeps], util.list_objects(nodeps))
def testRemoveDependency(self):
root = tracking.AutoTrackable()
root.a = tracking.AutoTrackable()
self.assertEqual(1, len(root._checkpoint_dependencies))
self.assertEqual(1, len(root._unconditional_checkpoint_dependencies))
self.assertIs(root.a, root._checkpoint_dependencies[0].ref)
del root.a
self.assertFalse(hasattr(root, "a"))
self.assertEqual(0, len(root._checkpoint_dependencies))
self.assertEqual(0, len(root._unconditional_checkpoint_dependencies))
root.a = tracking.AutoTrackable()
self.assertEqual(1, len(root._checkpoint_dependencies))
self.assertEqual(1, len(root._unconditional_checkpoint_dependencies))
self.assertIs(root.a, root._checkpoint_dependencies[0].ref)
def testListBasic(self):
a = tracking.AutoTrackable()
b = tracking.AutoTrackable()
a.l = [b]
c = tracking.AutoTrackable()
a.l.append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
direct_a_dep, = a._checkpoint_dependencies
self.assertEqual("l", direct_a_dep.name)
self.assertIn(b, direct_a_dep.ref)
self.assertIn(c, direct_a_dep.ref)
@test_util.run_in_graph_and_eager_modes
def testMutationDirtiesList(self):
a = tracking.AutoTrackable()
b = tracking.AutoTrackable()
a.l = [b]
c = tracking.AutoTrackable()
a.l.insert(0, c)
checkpoint = util.Checkpoint(a=a)
with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testOutOfBandEditDirtiesList(self):
a = tracking.AutoTrackable()
b = tracking.AutoTrackable()
held_reference = [b]
a.l = held_reference
c = tracking.AutoTrackable()
held_reference.append(c)
checkpoint = util.Checkpoint(a=a)
with self.assertRaisesRegexp(ValueError, "The wrapped list was modified"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testNestedLists(self):
a = tracking.AutoTrackable()
a.l = []
b = tracking.AutoTrackable()
a.l.append([b])
c = tracking.AutoTrackable()
a.l[0].append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
a.l[0].append(1)
d = tracking.AutoTrackable()
a.l[0].append(d)
a_deps = util.list_objects(a)
self.assertIn(d, a_deps)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
self.assertNotIn(1, a_deps)
e = tracking.AutoTrackable()
f = tracking.AutoTrackable()
a.l1 = [[], [e]]
a.l1[0].append(f)
a_deps = util.list_objects(a)
self.assertIn(e, a_deps)
self.assertIn(f, a_deps)
checkpoint = util.Checkpoint(a=a)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
a.l[0].append(data_structures.NoDependency([]))
a.l[0][-1].append(5)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
# Dirtying the inner list means the root object is unsaveable.
a.l[0][1] = 2
with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testDictionariesBasic(self):
a = training.Model()
b = training.Model()
a.attribute = {"b": b}
c = training.Model()
a.attribute["c"] = []
a.attribute["c"].append(c)
a_deps = util.list_objects(a)
self.assertIn(b, a_deps)
self.assertIn(c, a_deps)
self.assertIs(b, a.attribute["b"])
six.assertCountEqual(
self,
["b", "c"],
[dep.name for dep in a.attribute._checkpoint_dependencies])
self.assertEqual([b, c], a.layers)
self.assertEqual([b, c], a.attribute.layers)
self.assertEqual([c], a.attribute["c"].layers)
checkpoint = util.Checkpoint(a=a)
save_path = checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
with self.cached_session():
checkpoint.restore(save_path).assert_consumed().initialize_or_restore()
@test_util.run_in_graph_and_eager_modes
def testNoDepList(self):
a = training.Model()
a.l1 = data_structures.NoDependency([])
a.l1.insert(1, 0)
self.assertTrue(isinstance(a.l1, list))
checkpoint = util.Checkpoint(a=a)
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
a.l2 = []
a.l2.insert(1, 0)
with self.assertRaisesRegexp(ValueError, "A list element was replaced"):
checkpoint.save(os.path.join(self.get_temp_dir(), "ckpt"))
@test_util.run_in_graph_and_eager_modes
def testAssertions(self):
a = tracking.AutoTrackable()
a.l = {"k": [np.zeros([2, 2])]}
self.assertAllEqual(nest.flatten({"k": [np.zeros([2, 2])]}),
nest.flatten(a.l))
self.assertAllClose({"k": [np.zeros([2, 2])]}, a.l)
nest.map_structure(self.assertAllClose, a.l, {"k": [np.zeros([2, 2])]})
a.tensors = {"k": [array_ops.ones([2, 2]), array_ops.zeros([3, 3])]}
self.assertAllClose({"k": [np.ones([2, 2]), np.zeros([3, 3])]},
self.evaluate(a.tensors))
def test_property_cache(self):
test_counter = collections.Counter()
class MyObject(tracking.AutoTrackable):
def __init__(self):
super(MyObject, self).__init__()
self._frozen = True
def __setattr__(self, key, value):
"""Enforce that cache does not set attribute on MyObject."""
if getattr(self, "_frozen", False):
raise ValueError("Cannot mutate when frozen.")
return super(MyObject, self).__setattr__(key, value)
@property
@tracking.cached_per_instance
def test_property(self):
test_counter[id(self)] += 1
return id(self)
first_object = MyObject()
second_object = MyObject()
# Make sure the objects return the correct values
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Make sure the cache does not share across objects
self.assertNotEqual(first_object.test_property, second_object.test_property)
# Check again (Now the values should be cached.)
self.assertEqual(first_object.test_property, id(first_object))
self.assertEqual(second_object.test_property, id(second_object))
# Count the function calls to make sure the cache is actually being used.
self.assertAllEqual(tuple(test_counter.values()), (1, 1))
def test_property_cache_threaded(self):
call_count = collections.Counter()
class MyObject(tracking.AutoTrackable):
@property
@tracking.cached_per_instance
def test_property(self):
# Random sleeps to ensure that the execution thread changes
# mid-computation.
call_count["test_property"] += 1
time.sleep(np.random.random() + 1.)
# Use a RandomState which is seeded off the instance's id (the mod is
# because numpy limits the range of seeds) to ensure that an instance
# returns the same value in different threads, but different instances
# return different values.
return int(np.random.RandomState(id(self) % (2 ** 31)).randint(2 ** 16))
def get_test_property(self, _):
"""Function provided to .map for threading test."""
return self.test_property
# Test that multiple threads return the same value. This requires that
# the underlying function is repeatable, as cached_property makes no attempt
# to prioritize the first call.
test_obj = MyObject()
with contextlib.closing(multiprocessing.dummy.Pool(32)) as pool:
# Intentionally make a large pool (even when there are only a small number
# of cpus) to ensure that the runtime switches threads.
results = pool.map(test_obj.get_test_property, range(64))
self.assertEqual(len(set(results)), 1)
# Make sure we actually are testing threaded behavior.
self.assertGreater(call_count["test_property"], 1)
# Make sure new threads still cache hit.
with contextlib.closing(multiprocessing.dummy.Pool(2)) as pool:
start_time = timeit.default_timer() # Don't time pool instantiation.
results = pool.map(test_obj.get_test_property, range(4))
total_time = timeit.default_timer() - start_time
# Note(taylorrobie): The reason that it is safe to time a unit test is that
# a cache hit will be << 1 second, and a cache miss is
# guaranteed to be >= 1 second. Emperically confirmed by
# 100,000 runs with no flakes.
self.assertLess(total_time, 0.95)
def test_property_cache_serialization(self):
# Reset call count. .keys() must be wrapped in a list, because otherwise we
# would mutate the iterator while iterating.
for k in list(_PICKLEABLE_CALL_COUNT.keys()):
_PICKLEABLE_CALL_COUNT.pop(k)
first_instance = MyPickleableObject()
self.assertEqual(id(first_instance), first_instance.my_id)
# Test that we can pickle and un-pickle
second_instance = pickle.loads(pickle.dumps(first_instance))
self.assertEqual(id(second_instance), second_instance.my_id)
self.assertNotEqual(first_instance.my_id, second_instance.my_id)
# Make sure de-serialized object uses the cache.
self.assertEqual(_PICKLEABLE_CALL_COUNT[second_instance], 1)
# Make sure the decorator cache is not being serialized with the object.
expected_size = len(pickle.dumps(second_instance))
for _ in range(5):
# Add some more entries to the cache.
_ = MyPickleableObject().my_id
self.assertEqual(len(_PICKLEABLE_CALL_COUNT), 7)
size_check_instance = MyPickleableObject()
_ = size_check_instance.my_id
self.assertEqual(expected_size, len(pickle.dumps(size_check_instance)))
class _DummyResource(tracking.TrackableResource):
def __init__(self, handle_name):
self._handle_name = handle_name
super(_DummyResource, self).__init__()
def _create_resource(self):
return self._handle_name
class ResourceTrackerTest(test.TestCase):
def testBasic(self):
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
dummy_resource1 = _DummyResource("test1")
dummy_resource2 = _DummyResource("test2")
self.assertEqual(2, len(resource_tracker.resources))
self.assertEqual("test1", resource_tracker.resources[0].resource_handle)
self.assertEqual("test2", resource_tracker.resources[1].resource_handle)
def testTwoScopes(self):
resource_tracker1 = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker1):
dummy_resource1 = _DummyResource("test1")
resource_tracker2 = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker2):
dummy_resource2 = _DummyResource("test2")
self.assertEqual(1, len(resource_tracker1.resources))
self.assertEqual("test1", resource_tracker1.resources[0].resource_handle)
self.assertEqual(1, len(resource_tracker1.resources))
self.assertEqual("test2", resource_tracker2.resources[0].resource_handle)
def testNestedScopesScopes(self):
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
resource_tracker1 = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker1):
dummy_resource1 = _DummyResource("test1")
resource_tracker2 = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker2):
dummy_resource2 = _DummyResource("test2")
self.assertEqual(1, len(resource_tracker1.resources))
self.assertEqual("test1", resource_tracker1.resources[0].resource_handle)
self.assertEqual(1, len(resource_tracker1.resources))
self.assertEqual("test2", resource_tracker2.resources[0].resource_handle)
self.assertEqual(2, len(resource_tracker.resources))
self.assertEqual("test1", resource_tracker.resources[0].resource_handle)
self.assertEqual("test2", resource_tracker.resources[1].resource_handle)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/training/tracking/tracking_test.py
|
"""An object-local variable management scheme."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops as io_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
# Key where the object graph proto is saved in a TensorBundle
OBJECT_GRAPH_PROTO_KEY = "_CHECKPOINTABLE_OBJECT_GRAPH"
# A key indicating a variable's value in an object's checkpointed Tensors
# (Trackable._gather_saveables_for_checkpoint). If this is the only key and
# the object has no dependencies, then its value may be restored on object
# creation (avoiding double assignment when executing eagerly).
VARIABLE_VALUE_KEY = "VARIABLE_VALUE"
OBJECT_CONFIG_JSON_KEY = "OBJECT_CONFIG_JSON"
TrackableReference = collections.namedtuple(
"TrackableReference",
[
# The local name for this dependency.
"name",
# The Trackable object being referenced.
"ref"
])
class CheckpointInitialValue(ops.Tensor):
"""Tensor wrapper for managing update UIDs in `Variables`.
When supplied as an initial value, objects of this type let a `Variable`
(`Variable`, `ResourceVariable`, etc.) know the UID of the restore the initial
value came from. This allows deferred restorations to be sequenced in the
order the user specified them, and lets us fall back on assignment if an
initial value is not set (e.g. due to a custom getter interfering).
See comments in _add_variable_with_custom_getter for more information about
how `CheckpointInitialValue` is used.
"""
def __init__(self, checkpoint_position, shape=None):
self.wrapped_value = checkpoint_position.value_tensors()[VARIABLE_VALUE_KEY]
if shape:
# We need to set the static shape information on the initializer if
# possible so we don't get a variable with an unknown shape.
self.wrapped_value.set_shape(shape)
self._checkpoint_position = checkpoint_position
def __getattr__(self, attr):
try:
return getattr(self.wrapped_value, attr)
except AttributeError:
return self.__getattribute__(attr)
@property
def checkpoint_position(self):
return self._checkpoint_position
class NoRestoreSaveable(saveable_object.SaveableObject):
"""Embeds a tensor in a checkpoint with no restore ops."""
def __init__(self, tensor, name, dtype=None, device=None):
spec = saveable_object.SaveSpec(
tensor, "", name, dtype=dtype, device=device)
super(NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
@six.add_metaclass(abc.ABCMeta)
class PythonStateSaveable(saveable_object.SaveableObject):
"""An interface for saving/restoring volatile Python state."""
@abc.abstractmethod
def feed_dict_additions(self):
"""When running a graph, indicates fresh state to feed.
Returns:
A dictionary mapping `Tensor`s to current Python state.
"""
pass
@abc.abstractmethod
def freeze(self):
"""Create a new `SaveableObject` which freezes current state as a constant.
Used when executing eagerly to embed the current state as a constant, or
when creating a static tf.compat.v1.train.Saver with the frozen current
Python state.
Returns:
A `SaveableObject` which is not a `PythonStateSaveable` instance (i.e. has
no Python state associated with it).
"""
pass
class PythonStringStateSaveable(PythonStateSaveable):
"""Saves Python state in a checkpoint."""
def __init__(self, name, state_callback, restore_callback=None):
"""Configure saving.
Args:
name: The checkpoint key to write to.
state_callback: A function taking no arguments which returns a string.
This function is run every time a checkpoint is written.
restore_callback: A function taking a Python string, used to restore
state. Optional; defaults to doing nothing, in which case it is ignored
by status assertions such as assert_consumed().
"""
self._has_trivial_state_callback = (restore_callback is None)
def _state_callback_wrapper():
with ops.init_scope():
return state_callback()
self._state_callback = _state_callback_wrapper
self._restore_callback = restore_callback
with ops.device("/cpu:0"):
self._save_string = constant_op.constant("", dtype=dtypes.string)
spec = saveable_object.SaveSpec(
self._save_string, "", name, dtype=dtypes.string)
super(PythonStringStateSaveable, self).__init__(self._save_string, [spec],
name)
@property
def optional_restore(self):
"""For values with no restore, relaxes assert_consumed()."""
return self._has_trivial_state_callback
def feed_dict_additions(self):
"""When running a graph, indicates fresh state to feed."""
return {self._save_string: self._state_callback()}
def freeze(self):
"""Create a frozen `SaveableObject` which saves the current state."""
def _constant_state():
return constant_op.constant(self._state_callback(), dtype=dtypes.string)
return NoRestoreSaveable(
tensor=_constant_state,
dtype=dtypes.string,
name=self.name,
device="cpu:0")
def python_restore(self, restored_strings):
"""Called to restore Python state."""
if self._restore_callback:
restored, = restored_strings
self._restore_callback(restored)
def restore(self, restored_tensors, restored_shapes):
"""Called to restore TensorFlow state (nothing to do)."""
return control_flow_ops.no_op()
class CheckpointPosition(object):
"""Indicates a position within a `_CheckpointRestoreCoordinator`."""
def __init__(self, checkpoint, proto_id):
"""Specify an object within a checkpoint.
Args:
checkpoint: A _CheckpointRestoreCoordinator object.
proto_id: The index of this object in TrackableObjectGraph.nodes.
"""
self._checkpoint = checkpoint
self._proto_id = proto_id
def restore(self, trackable):
"""Restore this value into `trackable`."""
with ops.init_scope():
if self.bind_object(trackable):
# This object's correspondence with a checkpointed object is new, so
# process deferred restorations for it and its dependencies.
restore_ops = trackable._restore_from_checkpoint_position(self) # pylint: disable=protected-access
if restore_ops:
self._checkpoint.new_restore_ops(restore_ops)
def bind_object(self, trackable):
"""Set a checkpoint<->object correspondence and process slot variables.
Args:
trackable: The object to record a correspondence for.
Returns:
True if this is a new assignment, False if this object has already been
mapped to a checkpointed `Object` proto.
Raises:
AssertionError: If another object is already bound to the `Object` proto.
"""
checkpoint = self.checkpoint
checkpoint.all_python_objects.add(trackable)
current_assignment = checkpoint.object_by_proto_id.get(self._proto_id, None)
checkpoint.matched_proto_ids.add(self._proto_id)
if current_assignment is None:
checkpoint.object_by_proto_id[self._proto_id] = trackable
for deferred_slot_restoration in (
checkpoint.deferred_slot_restorations.pop(self._proto_id, ())):
trackable._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=CheckpointPosition(
checkpoint=checkpoint,
proto_id=deferred_slot_restoration.slot_variable_id),
variable=deferred_slot_restoration.original_variable,
slot_name=deferred_slot_restoration.slot_name)
for slot_restoration in checkpoint.slot_restorations.pop(
self._proto_id, ()):
optimizer_object = checkpoint.object_by_proto_id.get(
slot_restoration.optimizer_id, None)
if optimizer_object is None:
# The optimizer has not yet been created or tracked. Record in the
# checkpoint that the slot variables need to be restored when it is.
checkpoint.deferred_slot_restorations.setdefault(
slot_restoration.optimizer_id, []).append(
_DeferredSlotVariableRestoration(
original_variable=trackable,
slot_variable_id=slot_restoration.slot_variable_id,
slot_name=slot_restoration.slot_name))
else:
optimizer_object._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=CheckpointPosition(
checkpoint=checkpoint,
proto_id=slot_restoration.slot_variable_id),
variable=trackable,
slot_name=slot_restoration.slot_name)
return True # New assignment
else:
# The object was already mapped for this checkpoint load, which means
# we don't need to do anything besides check that the mapping is
# consistent (if the dependency DAG is not a tree then there are
# multiple paths to the same object).
if current_assignment is not trackable:
logging.warning((
"Inconsistent references when loading the checkpoint into this "
"object graph. Either the Trackable object references in the "
"Python program have changed in an incompatible way, or the "
"checkpoint was generated in an incompatible program.\n\nTwo "
"checkpoint references resolved to different objects (%s and %s)."),
current_assignment, trackable)
return False # Not a new assignment
def is_simple_variable(self):
"""Determine whether this value is restorable with a Tensor initializer."""
attributes = self.object_proto.attributes
return (len(attributes) == 1 and
attributes[0].name == VARIABLE_VALUE_KEY and
not self.object_proto.children)
def value_tensors(self):
"""Create value `Tensor`s for this object's attributes.
Does not require that the Python object has been created. Used for
restore-on-create when executing eagerly.
Returns:
A dictionary mapping from object attribute names to `Tensor`s.
"""
value_tensors = {}
for serialized_tensor in self.object_proto.attributes:
checkpoint_key = serialized_tensor.checkpoint_key
dtype = self._checkpoint.dtype_map[checkpoint_key]
base_type = dtype.base_dtype
with ops.init_scope():
with ops.device("/cpu:0"):
# Run the restore itself on the CPU.
value, = io_ops.restore_v2(
prefix=self._checkpoint.save_path_tensor,
tensor_names=[checkpoint_key],
shape_and_slices=[""],
dtypes=[base_type],
name="%s_checkpoint_read" % (serialized_tensor.name,))
# Copy the value to the current device if necessary.
value_tensors[serialized_tensor.name] = array_ops.identity(value)
return value_tensors
def _gather_ops_or_named_saveables(self):
"""Looks up or creates SaveableObjects which don't have cached ops."""
saveables = self.trackable._gather_saveables_for_checkpoint() # pylint: disable=protected-access
# Name saveables based on the name this object had when it was checkpointed.
named_saveables = {}
python_saveables = []
existing_restore_ops = []
for serialized_tensor in self.object_proto.attributes:
if context.executing_eagerly():
existing_op = None
else:
existing_op = self._checkpoint.restore_ops_by_name.get(
serialized_tensor.checkpoint_key, None)
if existing_op is not None:
existing_restore_ops.append(existing_op)
continue
# Only if we don't have cached ops for this SaveableObject, we'll see if
# the SaveableObject itself has been cached. If not, we'll make it, and
# either way we'll extract new ops from it (or if it has Python state to
# restore, we'll run that).
saveables_cache = self._checkpoint.graph_view.saveables_cache
if saveables_cache is None:
# No SaveableObject caching when executing eagerly.
saveable = None
else:
# If we've already created and cached a SaveableObject for this
# attribute, we can re-use it to avoid re-creating some ops when graph
# building.
saveable_list = saveables_cache.get(self.trackable,
{}).get(serialized_tensor.name,
(None,))
if len(saveable_list) == 1:
# Almost every attribute will have exactly one SaveableObject.
saveable, = saveable_list
else:
# Don't use cached SaveableObjects for partitioned variables, which is
# the only case where we'd have a list of SaveableObjects. Op caching
# will catch them.
saveable = None
if saveable is not None:
# The name of this attribute has changed, so we need to re-generate
# the SaveableObject.
if serialized_tensor.checkpoint_key not in saveable.name:
saveable = None
del saveables_cache[self.trackable]
break
if saveable is None:
# If there was no cached SaveableObject, we should check if the Python
# object has the attribute.
saveable_factory = saveables.get(serialized_tensor.name, None)
if saveable_factory is None:
# Purposefully does not throw an exception if attributes have been
# added or deleted. Stores unused attributes so an exception can be
# raised if the user decides to check that everything in the
# checkpoint was loaded.
if not serialized_tensor.optional_restore:
self._checkpoint.unused_attributes.setdefault(
self._proto_id, []).append(serialized_tensor.name)
continue
if callable(saveable_factory):
saveable = saveable_factory(name=serialized_tensor.checkpoint_key)
else:
saveable = saveable_factory
if saveables_cache is not None:
saveables_cache.setdefault(self.trackable,
{})[serialized_tensor.name] = [saveable]
if isinstance(saveable, PythonStateSaveable):
python_saveables.append(saveable)
else:
named_saveables[serialized_tensor.checkpoint_key] = saveable
return existing_restore_ops, named_saveables, python_saveables
def restore_ops(self):
"""Create or fetch restore ops for this object's attributes.
Requires that the `Trackable` Python object has been bound to an object
ID in the checkpoint.
Returns:
A list of operations when graph building, or an empty list when executing
eagerly.
"""
(restore_ops, tensor_saveables,
python_saveables) = self._gather_ops_or_named_saveables()
restore_ops.extend(
self._checkpoint.restore_saveables(tensor_saveables, python_saveables))
return restore_ops
@property
def checkpoint(self):
return self._checkpoint
@property
def trackable(self):
return self._checkpoint.object_by_proto_id[self._proto_id]
@property
def object_proto(self):
return self._checkpoint.object_graph_proto.nodes[self._proto_id]
@property
def restore_uid(self):
return self._checkpoint.restore_uid
def __repr__(self):
return repr(self.object_proto)
_DeferredSlotVariableRestoration = collections.namedtuple(
"_DeferredSlotVariableRestoration", [
"original_variable",
"slot_variable_id",
"slot_name",
])
_SlotVariableRestoration = collections.namedtuple(
"_SlotVariableRestoration",
[
# The checkpoint proto id of the optimizer object.
"optimizer_id",
# The checkpoint proto id of the slot variable.
"slot_variable_id",
"slot_name",
])
def no_automatic_dependency_tracking(method):
"""Disables automatic dependency tracking on attribute assignment.
Use to decorate any method of a Trackable object. Attribute assignment in
that method will not add dependencies (also respected in Model). Harmless if
used in a class which does not do automatic dependency tracking (which means
it's safe to use in base classes which may have subclasses which also inherit
from Trackable).
Args:
method: The method to decorate.
Returns:
A decorated method which sets and un-sets automatic dependency tracking for
the object the method is called on (not thread safe).
"""
def _method_wrapper(self, *args, **kwargs):
previous_value = getattr(self, "_self_setattr_tracking", True)
self._self_setattr_tracking = False # pylint: disable=protected-access
try:
result = method(self, *args, **kwargs)
finally:
self._self_setattr_tracking = previous_value # pylint: disable=protected-access
return result
return tf_decorator.make_decorator(
target=method, decorator_func=_method_wrapper)
@tf_contextlib.contextmanager
def no_automatic_dependency_tracking_scope(obj):
"""A context that disables automatic dependency tracking when assigning attrs.
Objects that inherit from Autotrackable automatically creates dependencies
to trackable objects through attribute assignments, and wraps data structures
(lists or dicts) with trackable classes. This scope may be used to temporarily
disable this behavior. This works similar to the decorator
`no_automatic_dependency_tracking`.
Example usage:
```
model = tf.keras.Model()
model.arr1 = [] # Creates a ListWrapper object
with no_automatic_dependency_tracking_scope(model):
model.arr2 = [] # Creates a regular, untracked python list
```
Args:
obj: A trackable object.
Yields:
a scope in which the object doesn't track dependencies.
"""
previous_value = getattr(obj, "_setattr_tracking", True)
obj._setattr_tracking = False # pylint: disable=protected-access
try:
yield
finally:
obj._setattr_tracking = previous_value # pylint: disable=protected-access
class Trackable(object):
"""Base class for `Trackable` objects without automatic dependencies.
This class has no __setattr__ override for performance reasons. Dependencies
must be added explicitly. Unless attribute assignment is performance-critical,
use `AutoTrackable` instead. Use `Trackable` for `isinstance`
checks.
"""
# For compatibility with wrapt.ObjectProxy, attributes are all prefixed with
# _self_. We have some properties to forward semi-public attributes to their
# _self_ equivalents.
@property
def _setattr_tracking(self):
if not hasattr(self, "_self_setattr_tracking"):
self._self_setattr_tracking = True
return self._self_setattr_tracking
@_setattr_tracking.setter
def _setattr_tracking(self, value):
self._self_setattr_tracking = value
@property
def _update_uid(self):
return self._self_update_uid
@_update_uid.setter
def _update_uid(self, value):
self._self_update_uid = value
@property
def _unconditional_checkpoint_dependencies(self):
return self._self_unconditional_checkpoint_dependencies
@property
def _unconditional_dependency_names(self):
return self._self_unconditional_dependency_names
@property
def _name_based_restores(self):
return self._self_name_based_restores
# Trackable does not do automatic dependency tracking, but uses the
# no_automatic_dependency_tracking decorator so it can avoid adding
# dependencies if a subclass is Trackable / inherits from Model (both of
# which have __setattr__ overrides).
@no_automatic_dependency_tracking
def _maybe_initialize_trackable(self):
"""Initialize dependency management.
Not __init__, since most objects will forget to call it.
"""
if hasattr(self, "_self_unconditional_checkpoint_dependencies"):
# __init__ already called. This check means that we don't need
# Trackable.__init__() in the constructor of every TensorFlow object.
return
# A list of TrackableReference objects. Some classes implementing
# `Trackable`, notably `Optimizer`s, may override the
# _checkpoint_dependencies property with conditional dependencies
# (e.g. based on the current graph when saving).
self._self_unconditional_checkpoint_dependencies = []
# Maps names -> Trackable objects
self._self_unconditional_dependency_names = {}
# Restorations for other Trackable objects on which this object may
# eventually depend. Maps local name -> CheckpointPosition list. Optimizers
# tack on conditional dependencies, and so need separate management of
# deferred dependencies too.
self._self_unconditional_deferred_dependencies = {}
# The UID of the highest assignment to this object. Used to ensure that the
# last requested assignment determines the final value of an object.
if hasattr(self, "_self_update_uid"):
raise AssertionError(
"Internal error: the object had an update UID set before its "
"initialization code was run.")
self._self_update_uid = -1
# When executing eagerly, holds a collection of _NameBasedRestoreCoordinator
# instances, which should be checked when creating variables or other
# saveables. These are passed on recursively to all dependencies, since
# unlike object-based checkpoint restores we don't know which subgraph is
# being restored in advance. This mechanism is only necessary for
# restore-on-create when executing eagerly, and so is unused when graph
# building.
self._self_name_based_restores = set()
@property
def _object_identifier(self):
"""String used to identify this object in a SavedModel.
Generally, the object identifier is constant across objects of the same
class, while the metadata field is used for instance-specific data.
Returns:
String object identifier.
"""
return "_generic_user_object"
@property
def _tracking_metadata(self):
"""String containing object metadata, which is saved to the SavedModel."""
return ""
def _no_dependency(self, value):
"""If automatic dependency tracking is enabled, ignores `value`."""
return value
def _name_based_attribute_restore(self, checkpoint):
"""Restore the object's attributes from a name-based checkpoint."""
self._self_name_based_restores.add(checkpoint)
if self._self_update_uid < checkpoint.restore_uid:
checkpoint.eager_restore(self)
self._self_update_uid = checkpoint.restore_uid
@property
def _checkpoint_dependencies(self):
"""All dependencies of this object.
May be overridden to include conditional dependencies.
Returns:
A list of `TrackableReference` objects indicating named
`Trackable` dependencies which should be saved along with this
object.
"""
return self._self_unconditional_checkpoint_dependencies
@property
def _deferred_dependencies(self):
"""A dictionary with deferred dependencies.
Stores restorations for other Trackable objects on which this object
may eventually depend. May be overridden by sub-classes (e.g. Optimizers use
conditional dependencies based the current graph, and so need separate
management of deferred dependencies too).
Returns:
A dictionary mapping from local name to a list of CheckpointPosition
objects.
"""
return self._self_unconditional_deferred_dependencies
def _lookup_dependency(self, name):
"""Look up a dependency by name.
May be overridden to include conditional dependencies.
Args:
name: The local name of the dependency.
Returns:
A `Trackable` object, or `None` if no dependency by this name was
found.
"""
return self._self_unconditional_dependency_names.get(name, None)
def _add_variable_with_custom_getter(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
getter=None,
overwrite=False,
**kwargs_for_getter):
"""Restore-on-create for a variable be saved with this `Trackable`.
If the user has requested that this object or another `Trackable` which
depends on this object be restored from a checkpoint (deferred loading
before variable object creation), `initializer` may be ignored and the value
from the checkpoint used instead.
Args:
name: A name for the variable. Must be unique within this object.
shape: The shape of the variable.
dtype: The data type of the variable.
initializer: The initializer to use. Ignored if there is a deferred
restoration left over from a call to
`_restore_from_checkpoint_position`.
getter: The getter to wrap which actually fetches the variable.
overwrite: If True, disables unique name and type checks.
**kwargs_for_getter: Passed to the getter.
Returns:
The new variable object.
Raises:
ValueError: If the variable name is not unique.
"""
self._maybe_initialize_trackable()
with ops.init_scope():
if context.executing_eagerly():
# If this is a variable with a single Tensor stored in the checkpoint,
# we can set that value as an initializer rather than initializing and
# then assigning (when executing eagerly). This call returns None if
# there is nothing to restore.
checkpoint_initializer = self._preload_simple_restoration(
name=name, shape=shape)
else:
checkpoint_initializer = None
if (checkpoint_initializer is not None and
not (isinstance(initializer, CheckpointInitialValue) and
(initializer.restore_uid > checkpoint_initializer.restore_uid))):
# If multiple Trackable objects are "creating" the same variable
# via the magic of custom getters, the one with the highest restore UID
# (the one called last) has to make the final initializer. If another
# custom getter interrupts this process by overwriting the initializer,
# then we'll catch that when we call _track_trackable. So this is
# "best effort" to set the initializer with the highest restore UID.
initializer = checkpoint_initializer
shape = None
new_variable = getter(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
**kwargs_for_getter)
# If we set an initializer and the variable processed it, tracking will not
# assign again. It will add this variable to our dependencies, and if there
# is a non-trivial restoration queued, it will handle that. This also
# handles slot variables.
if not overwrite or isinstance(new_variable, Trackable):
return self._track_trackable(new_variable, name=name, overwrite=overwrite)
else:
# TODO(allenl): Some variable types are not yet supported. Remove this
# fallback once all get_variable() return types are Trackable.
return new_variable
def _preload_simple_restoration(self, name, shape):
"""Return a dependency's value for restore-on-create.
Note the restoration is not deleted; if for some reason preload is called
and then not assigned to the variable (for example because a custom getter
overrides the initializer), the assignment will still happen once the
variable is tracked (determined based on checkpoint.restore_uid).
Args:
name: The object-local name of the dependency holding the variable's
value.
shape: The shape of the variable being loaded into.
Returns:
An callable for use as a variable's initializer/initial_value, or None if
one should not be set (either because there was no variable with this name
in the checkpoint or because it needs more complex deserialization). Any
non-trivial deserialization will happen when the variable object is
tracked.
"""
deferred_dependencies_list = self._deferred_dependencies.get(name, ())
if not deferred_dependencies_list:
# Nothing to do; we don't have a restore for this dependency queued up.
return
for checkpoint_position in deferred_dependencies_list:
if not checkpoint_position.is_simple_variable():
# If _any_ pending restoration is too complicated to fit in an
# initializer (because it has dependencies, or because there are
# multiple Tensors to restore), bail and let the general tracking code
# handle it.
return None
checkpoint_position = max(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid)
return CheckpointInitialValue(
checkpoint_position=checkpoint_position, shape=shape)
def _track_trackable(self, trackable, name, overwrite=False):
"""Declare a dependency on another `Trackable` object.
Indicates that checkpoints for this object should include variables from
`trackable`.
Variables in a checkpoint are mapped to `Trackable`s based on the names
provided when the checkpoint was written. To avoid breaking existing
checkpoints when modifying a class, neither variable names nor dependency
names (the names passed to `_track_trackable`) may change.
Args:
trackable: A `Trackable` which this object depends on.
name: A local name for `trackable`, used for loading checkpoints into the
correct objects.
overwrite: Boolean, whether silently replacing dependencies is OK. Used
for __setattr__, where throwing an error on attribute reassignment would
be inappropriate.
Returns:
`trackable`, for convenience when declaring a dependency and
assigning to a member variable in one statement.
Raises:
TypeError: If `trackable` does not inherit from `Trackable`.
ValueError: If another object is already tracked by this name.
"""
self._maybe_initialize_trackable()
if not isinstance(trackable, Trackable):
raise TypeError(("Trackable._track_trackable() passed type %s, not a "
"Trackable.") % (type(trackable),))
new_reference = TrackableReference(name=name, ref=trackable)
current_object = self._lookup_dependency(name)
if (current_object is not None and current_object is not trackable):
if not overwrite:
raise ValueError(
("Called Trackable._track_trackable() with name='%s', "
"but a Trackable with this name is already declared as a "
"dependency. Names must be unique (or overwrite=True).") % (name,))
# This is a weird thing to do, but we're not going to stop people from
# using __setattr__.
for index, (old_name, _) in enumerate(
self._self_unconditional_checkpoint_dependencies):
if name == old_name:
self._self_unconditional_checkpoint_dependencies[
index] = new_reference
elif current_object is None:
self._self_unconditional_checkpoint_dependencies.append(new_reference)
self._handle_deferred_dependencies(name=name, trackable=trackable)
self._self_unconditional_dependency_names[name] = trackable
return trackable
def _handle_deferred_dependencies(self, name, trackable):
"""Pop and load any deferred checkpoint restores into `trackable`.
This method does not add a new dependency on `trackable`, but it does
check if any outstanding/deferred dependencies have been queued waiting for
this dependency to be added (matched based on `name`). If so,
`trackable` and its dependencies are restored. The restorations are
considered fulfilled and so are deleted.
`_track_trackable` is more appropriate for adding a
normal/unconditional dependency, and includes handling for deferred
restorations. This method allows objects such as `Optimizer` to use the same
restoration logic while managing conditional dependencies themselves, by
overriding `_checkpoint_dependencies` and `_lookup_dependency` to change the
object's dependencies based on the context it is saved/restored in (a single
optimizer instance can have state associated with multiple graphs).
Args:
name: The name of the dependency within this object (`self`), used to
match `trackable` with values saved in a checkpoint.
trackable: The Trackable object to restore (inheriting from `Trackable`).
"""
self._maybe_initialize_trackable()
trackable._maybe_initialize_trackable() # pylint: disable=protected-access
deferred_dependencies_list = self._deferred_dependencies.pop(name, ())
for checkpoint_position in sorted(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid,
reverse=True):
checkpoint_position.restore(trackable)
# Pass on any name-based restores queued in this object.
for name_based_restore in sorted(
self._self_name_based_restores,
key=lambda checkpoint: checkpoint.restore_uid,
reverse=True):
trackable._name_based_attribute_restore(name_based_restore) # pylint: disable=protected-access
def _restore_from_checkpoint_position(self, checkpoint_position):
"""Restore this object and its dependencies (may be deferred)."""
# Attempt a breadth-first traversal, since presumably the user has more
# control over shorter paths. If we don't have all of the dependencies at
# this point, the end result is not breadth-first (since other deferred
# traversals will happen later).
visit_queue = collections.deque([checkpoint_position])
restore_ops = []
while visit_queue:
current_position = visit_queue.popleft()
restore_ops.extend(
nest.flatten(current_position.trackable # pylint: disable=protected-access
._single_restoration_from_checkpoint_position(
checkpoint_position=current_position,
visit_queue=visit_queue)))
return restore_ops
def _single_restoration_from_checkpoint_position(self, checkpoint_position,
visit_queue):
"""Restore this object, and either queue its dependencies or defer them."""
self._maybe_initialize_trackable()
checkpoint = checkpoint_position.checkpoint
# If the UID of this restore is lower than our current update UID, we don't
# need to actually restore the object. However, we should pass the
# restoration on to our dependencies.
if checkpoint.restore_uid > self._self_update_uid:
restore_ops = checkpoint_position.restore_ops()
self._self_update_uid = checkpoint.restore_uid
else:
restore_ops = ()
for child in checkpoint_position.object_proto.children:
child_position = CheckpointPosition(
checkpoint=checkpoint, proto_id=child.node_id)
local_object = self._lookup_dependency(child.local_name)
if local_object is None:
# We don't yet have a dependency registered with this name. Save it
# in case we do.
self._deferred_dependencies.setdefault(child.local_name,
[]).append(child_position)
else:
if child_position.bind_object(trackable=local_object):
# This object's correspondence is new, so dependencies need to be
# visited. Delay doing it so that we get a breadth-first dependency
# resolution order (shallowest paths first). The caller is responsible
# for emptying visit_queue.
visit_queue.append(child_position)
return restore_ops
def _gather_saveables_for_checkpoint(self):
"""Returns a dictionary of values to checkpoint with this object.
Keys in the returned dictionary are local to this object and in a separate
namespace from dependencies. Values may either be `SaveableObject` factories
or variables easily converted to `SaveableObject`s (as in
`tf.compat.v1.train.Saver`'s
`var_list` constructor argument).
`SaveableObjects` have a name set, which Trackable needs to generate
itself. So rather than returning `SaveableObjects` directly, this method
should return a dictionary of callables which take `name` arguments and
return `SaveableObjects` with that name.
If this object may also be passed to the global-name-based
`tf.compat.v1.train.Saver`,
the returned callables should have a default value for their name argument
(i.e. be callable with no arguments).
Returned values must be saved only by this object; if any value may be
shared, it should instead be a dependency. For example, variable objects
save their own values with the key `VARIABLE_VALUE_KEY`, but objects which
reference variables simply add a dependency.
Returns:
The dictionary mapping attribute names to `SaveableObject` factories
described above. For example:
{VARIABLE_VALUE_KEY:
lambda name="global_name_for_this_object":
SaveableObject(name=name, ...)}
"""
return {}
def _list_extra_dependencies_for_serialization(self, serialization_cache):
"""Lists extra dependencies to serialize.
Internal sub-classes can override this method to return extra dependencies
that should be saved with the object during SavedModel serialization. For
example, this is used to save `trainable_variables` in Keras models. The
python property `trainable_variables` contains logic to iterate through the
weights from the model and its sublayers. The serialized Keras model saves
`trainable_weights` as a trackable list of variables.
PLEASE NOTE when overriding this method:
1. This function may only generate new trackable objects the first time it
is called.
2. The returned dictionary must not have naming conflicts with
dependencies tracked by the root. In other words, if the root is
tracking `object_1` with name 'x', and this functions returns
`{'x': object_2}`, an error is raised when saving.
Args:
serialization_cache: A dictionary shared between all objects in the same
object graph. This object is passed to both
`_list_extra_dependencies_for_serialization` and
`_list_functions_for_serialization`.
Returns:
A dictionary mapping attribute names to trackable objects.
"""
del serialization_cache
return dict()
def _list_functions_for_serialization(self, serialization_cache):
"""Lists the functions of this trackable to serialize.
Internal sub-classes can override this with specific logic. E.g.
`AutoTrackable` provides an implementation that returns the `attr`
that return functions.
Args:
serialization_cache: Dictionary passed to all objects in the same object
graph during serialization.
Returns:
A dictionary mapping attribute names to `Function` or
`ConcreteFunction`.
"""
del serialization_cache
return dict()
|
tensorflow-master
|
tensorflow/python/training/tracking/base.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for the functional saver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import test
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import gfile
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.saving import saveable_object_util
class SaverTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_resource_variable(self):
v1 = resource_variable_ops.ResourceVariable(2.)
self.evaluate(v1.initializer)
saver = functional_saver._SingleDeviceSaver(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(2, len(gfile.Glob(prefix + "*")))
self.evaluate(v1.assign(1.))
self.evaluate(saver.restore(prefix))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
self.evaluate(v2.initializer)
second_saver = functional_saver._SingleDeviceSaver(
saveable_object_util.saveable_objects_for_op(v2, "x"))
self.evaluate(second_saver.restore(prefix))
self.assertEqual(2., self.evaluate(v2))
def test_to_proto(self):
v1 = resource_variable_ops.ResourceVariable(2.)
saver = functional_saver.MultiDeviceSaver(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
proto_accumulator = []
wrapped = wrap_function.wrap_function(
lambda: proto_accumulator.append(saver.to_proto()), signature=())
self.assertEqual(1, len(proto_accumulator))
proto = proto_accumulator[0]
save = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_tensor_name),
fetches=wrapped.graph.get_tensor_by_name(proto.save_tensor_name))
restore = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_tensor_name),
fetches=wrapped.graph.get_operation_by_name(proto.restore_op_name))
save_path = save(constant_op.constant(prefix))
v1.assign(1.)
restore(constant_op.constant(save_path))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
second_saver = functional_saver.MultiDeviceSaver(
saveable_object_util.saveable_objects_for_op(v2, "x"))
second_saver.restore(save_path)
self.assertEqual(2., self.evaluate(v2))
@test_util.run_v1_only(
"Needs an API to setup multiple devices, b/124805129")
# Set up multiple devices when graph building. Before test.main() we configure
# the devices for eager execution.
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
def test_checkpoint_is_sharded_by_device(self):
with ops.device("cpu:0"):
v0 = resource_variable_ops.ResourceVariable(0.)
with ops.device("cpu:1"):
v1 = resource_variable_ops.ResourceVariable(1.)
with ops.device("cpu:2"):
v2 = resource_variable_ops.ResourceVariable(2.)
self.evaluate([v0.initializer, v1.initializer, v2.initializer])
saver = functional_saver.MultiDeviceSaver(
list(saveable_object_util.saveable_objects_for_op(v0, "v0"))
+ list(saveable_object_util.saveable_objects_for_op(v1, "v1"))
+ list(saveable_object_util.saveable_objects_for_op(v2, "v2")))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(4, len(gfile.Glob(prefix + "*")))
self.evaluate(v0.assign(-1.))
self.evaluate(v1.assign(-1.))
self.evaluate(v2.assign(-1.))
self.evaluate(saver.restore(constant_op.constant(prefix)))
self.assertEqual(0., self.evaluate(v0))
self.assertEqual(1., self.evaluate(v1))
self.assertEqual(2., self.evaluate(v2))
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
test.main()
|
tensorflow-master
|
tensorflow/python/training/saving/functional_saver_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Saves and restore variables inside traced @tf.functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import uuid
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.saving import saveable_object_util
from tensorflow.python.util import nest
class _SingleDeviceSaver(object):
"""Saves and restores checkpoints from the current device."""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
"""
saveable_objects = list(saveable_objects)
for saveable in saveable_objects:
if not isinstance(saveable, saveable_object.SaveableObject):
raise ValueError(
"Expected a list of SaveableObjects, got %s." % (saveable,))
self._saveable_objects = saveable_objects
def save(self, file_prefix):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
Returns:
An `Operation`, or None when executing eagerly.
"""
tensor_names = []
tensors = []
tensor_slices = []
for saveable in self._saveable_objects:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
with ops.device("cpu:0"):
return io_ops.save_v2(file_prefix, tensor_names, tensor_slices, tensors)
def restore(self, file_prefix):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
restore_specs = []
tensor_structure = []
for saveable in self._saveable_objects:
saveable_tensor_structure = []
tensor_structure.append(saveable_tensor_structure)
for spec in saveable.specs:
saveable_tensor_structure.append(spec.name)
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
tensor_names, tensor_slices, tensor_dtypes = zip(*restore_specs)
with ops.device("cpu:0"):
restored_tensors = io_ops.restore_v2(
file_prefix, tensor_names, tensor_slices, tensor_dtypes)
structured_restored_tensors = nest.pack_sequence_as(
tensor_structure, restored_tensors)
restore_ops = {}
for saveable, restored_tensors in zip(self._saveable_objects,
structured_restored_tensors):
restore_ops[saveable.name] = saveable.restore(
restored_tensors, restored_shapes=None)
return restore_ops
def sharded_filename(filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
class MultiDeviceSaver(object):
"""Saves checkpoints directly from multiple devices.
Note that this is a low-level utility which stores Tensors in the keys
specified by `SaveableObject`s. Higher-level utilities for object-based
checkpointing are built on top of it.
"""
def __init__(self, saveable_objects):
"""Specify a list of `SaveableObject`s to save and restore.
Args:
saveable_objects: A list of `SaveableObject`s.
"""
saveable_objects = list(saveable_objects)
saveables_by_device = {}
for saveable in saveable_objects:
if not isinstance(saveable, saveable_object.SaveableObject):
raise ValueError(
"Expected a dictionary of SaveableObjects, got {}."
.format(saveable))
saveables_by_device.setdefault(saveable.device, []).append(saveable)
self._single_device_savers = {
device: _SingleDeviceSaver(saveables)
for device, saveables in saveables_by_device.items()}
def to_proto(self):
"""Serializes to a SaverDef referencing the current graph."""
filename_tensor = array_ops.placeholder(
shape=[], dtype=dtypes.string, name="saver_filename")
save_tensor = self._traced_save(filename_tensor)
restore_op = self._traced_restore(filename_tensor).op
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
version=saver_pb2.SaverDef.V2)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_save(self, file_prefix):
save_op = self.save(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies([save_op]):
return array_ops.identity(file_prefix)
@def_function.function(
input_signature=(tensor_spec.TensorSpec(shape=(), dtype=dtypes.string),),
autograph=False)
def _traced_restore(self, file_prefix):
restore_ops = self.restore(file_prefix)
with ops.device("cpu:0"):
with ops.control_dependencies(restore_ops.values()):
return array_ops.identity(file_prefix)
def save(self, file_prefix):
"""Save the saveable objects to a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix to
save under.
Returns:
An `Operation`, or None when executing eagerly.
"""
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><sharded_suffix>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
sharded_suffix = "_temp_%s/part" % uuid.uuid4().hex
with ops.device("cpu:0"):
tmp_checkpoint_prefix = string_ops.string_join(
[file_prefix, sharded_suffix])
num_shards = len(self._single_device_savers)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saver) in enumerate(
sorted(self._single_device_savers.items())):
last_device = device
with ops.device(saveable_object_util.set_cpu0(device)):
shard_prefix = sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(shard_prefix)
with ops.device(device):
# _SingleDeviceSaver will use the CPU device when necessary, but initial
# read operations should be placed on the SaveableObject's device.
sharded_saves.append(saver.save(shard_prefix))
with ops.control_dependencies(sharded_saves):
# Co-locates the merge step with the last device.
with ops.device(saveable_object_util.set_cpu0(last_device)):
# V2 format write path consists of a metadata merge step. Once merged,
# attempts to delete the temporary directory, "<user-fed prefix>_temp".
return gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, file_prefix, delete_old_dirs=True)
def restore(self, file_prefix):
"""Restore the saveable objects from a checkpoint with `file_prefix`.
Args:
file_prefix: A string or scalar string Tensor containing the prefix for
files to read from.
Returns:
A dictionary mapping from SaveableObject names to restore operations.
"""
restore_ops = {}
# Sort by device name to avoid propagating non-deterministic dictionary
# ordering in some Python versions.
for device, saver in sorted(self._single_device_savers.items()):
with ops.device(device):
restore_ops.update(saver.restore(file_prefix))
return restore_ops
|
tensorflow-master
|
tensorflow/python/training/saving/functional_saver.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Types for specifying saving and loading behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class SaveSpec(object):
"""Class used to describe tensor slices that need to be saved."""
def __init__(self, tensor, slice_spec, name, dtype=None, device=None):
"""Creates a `SaveSpec` object.
Args:
tensor: the tensor to save or callable that produces a tensor to save.
slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.
name: the name to save the tensor under.
dtype: The data type of the Tensor. Required if `tensor` is callable.
Used for error checking in the restore op.
device: The device generating and consuming this tensor. Required if
`tensor` is callable. Used to group objects to save by device.
"""
self._tensor = tensor
self.slice_spec = slice_spec
self.name = name
if callable(self._tensor):
if dtype is None or device is None:
raise AssertionError(
"When passing a callable `tensor` to a SaveSpec, an explicit "
"dtype and device must be provided.")
self.dtype = dtype
self.device = device
else:
self.dtype = tensor.dtype
self.device = tensor.device
@property
def tensor(self):
return self._tensor() if callable(self._tensor) else self._tensor
class SaveableObject(object):
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
@property
def optional_restore(self):
"""A hint to restore assertions that this object is optional."""
return False # Default to required
@property
def device(self):
"""The device for SaveSpec Tensors."""
return self.specs[0].device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
|
tensorflow-master
|
tensorflow/python/training/saving/saveable_object.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for working with and creating SaveableObjects."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.saving import saveable_object
from tensorflow.python.training.tracking import base as trackable
# Op names which identify variable reads which should be saved.
_VARIABLE_OPS = set(["Variable",
"VariableV2",
"AutoReloadVariable",
"VarHandleOp",
"ReadVariableOp"])
def set_cpu0(device_string):
"""Creates a new device string based on `device_string` but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device = parsed_device.replace(device_type="CPU", device_index=0)
return parsed_device.to_string()
class ReferenceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles reference variables."""
def __init__(self, var, slice_spec, name):
spec = saveable_object.SaveSpec(var, slice_spec, name, dtype=var.dtype)
super(ReferenceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return state_ops.assign(
self.op,
restored_tensor,
validate_shape=restored_shapes is None and
self.op.get_shape().is_fully_defined())
class ResourceVariableSaveable(saveable_object.SaveableObject):
"""SaveableObject implementation that handles ResourceVariables."""
def __init__(self, var, slice_spec, name):
self._var_device = var.device
self._var_shape = var.shape
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
tensor = var
elif resource_variable_ops.is_resource_variable(var):
def _read_variable_closure(v):
def f():
with ops.device(v.device):
x = v.read_value()
# To allow variables placed on non-CPU devices to be checkpointed,
# we copy them to CPU on the same machine first.
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return f
self.handle_op = var.handle
tensor = _read_variable_closure(var)
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = saveable_object.SaveSpec(tensor, slice_spec, name,
dtype=var.dtype, device=var.device)
super(ResourceVariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
# Copy the restored tensor to the variable's device.
with ops.device(self._var_device):
restored_tensor = array_ops.identity(restored_tensor)
return resource_variable_ops.shape_safe_assign_variable_handle(
self.handle_op, self._var_shape, restored_tensor)
def _tensor_comes_from_variable(v):
return isinstance(v, ops.Tensor) and v.op.type in _VARIABLE_OPS
def saveable_objects_for_op(op, name):
"""Create `SaveableObject`s from an operation.
Args:
op: A variable, operation, or SaveableObject to coerce into a
SaveableObject.
name: A string name for the SaveableObject.
Yields:
`SaveableObject`s which together save/restore `op`.
Raises:
TypeError: If `name` is not a string.
ValueError: For operations with no known conversion to SaveableObject.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"names_to_saveables must be a dict mapping string names to "
"trackable operations. Name is not a string: %s" % name)
if isinstance(op, saveable_object.SaveableObject):
yield op
elif isinstance(op, (list, tuple, variables.PartitionedVariable)):
if isinstance(op, variables.PartitionedVariable):
op = list(op)
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in op:
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s" %
(slice_name, variable._save_slice_info.full_name))
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(
variable, variable._save_slice_info.spec, name)
else:
yield ResourceVariableSaveable(
variable, variable._save_slice_info.spec, name)
# pylint: enable=protected-access
elif isinstance(op, trackable.Trackable) and not isinstance(
op, variables.Variable):
# pylint: disable=protected-access
for attr, factory in op._gather_saveables_for_checkpoint().items():
if attr == trackable.VARIABLE_VALUE_KEY:
# Keep original name for classes masquerading as variables.
full_name = name
else:
full_name = name + "_" + attr
op = (factory(full_name) if callable(factory) else factory)
for op in saveable_objects_for_op(op, op.name):
yield op
# pylint: enable=protected-access
else:
# A variable or tensor.
if isinstance(op, resource_variable_ops.BaseResourceVariable):
# pylint: disable=protected-access
if op._in_graph_mode:
variable = op._graph_element
else:
variable = op
# pylint: enable=protected-access
yield ResourceVariableSaveable(variable, "", name)
else:
if context.executing_eagerly():
raise ValueError("Can only save/restore ResourceVariables when "
"executing eagerly, got type: %s." % type(op))
variable = ops.internal_convert_to_tensor(op, as_ref=True)
if not _tensor_comes_from_variable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield ReferenceVariableSaveable(variable, "", name)
else:
yield ResourceVariableSaveable(
variable, "", name)
def op_list_to_dict(op_list, convert_variable_to_tensor=True):
"""Create a dictionary of names to operation lists.
Args:
op_list: A list, tuple, or set of Variables or SaveableObjects.
convert_variable_to_tensor: Whether or not to convert single Variables
with no slice info into Tensors.
Returns:
A dictionary of names to the operations that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of op_list or its elements is not supported.
ValueError: If at least two saveables share the same name.
"""
if not isinstance(op_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % op_list)
# When ResourceVariables are converted to Tensors, read ops are added to the
# graph. Sorting the op_list ensures that the resulting graph is always
# constructed in a deterministic way:
op_list = sorted(op_list, key=lambda x: x.name)
names_to_saveables = {}
# pylint: disable=protected-access
for var in op_list:
resource_or_ref_variable = (
isinstance(var, resource_variable_ops.BaseResourceVariable) or
isinstance(var, variables.RefVariable))
if isinstance(var, saveable_object.SaveableObject):
names_to_saveables[var.name] = var
elif isinstance(var, variables.PartitionedVariable):
if var.name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
var.name)
names_to_saveables[var.name] = var
elif isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_saveables:
if not isinstance(names_to_saveables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_saveables[name].append(var)
else:
names_to_saveables[name] = [var]
elif isinstance(var, trackable.Trackable) and not resource_or_ref_variable:
trackable_saveables = [
(factory() if callable(factory) else factory)
for factory in var._gather_saveables_for_checkpoint().values()]
names_to_saveables.update(
op_list_to_dict(trackable_saveables))
else:
# Variables (reference and resource) have an _in_graph_mode property
# indicating whether they were created in a graph building context. We
# also get Tensors when graph building, which do not have this property.
if not getattr(var, "_in_graph_mode", True):
if not isinstance(var, resource_variable_ops.BaseResourceVariable):
raise ValueError(
"Can only save/restore ResourceVariables when eager execution "
"is enabled, type: %s." % type(var))
set_var = names_to_saveables.setdefault(var._shared_name, var)
if set_var is not var:
raise ValueError(
("Two different ResourceVariable objects with the same "
"shared_name '%s' were passed to the Saver. This likely means "
"that they were created in different Graphs or isolation "
"contexts, and may not be checkpointed together.") %
(var._shared_name,))
else:
if convert_variable_to_tensor:
if isinstance(var, resource_variable_ops.BaseResourceVariable):
var = var._graph_element # pylint: disable=protected-access
else:
var = ops.internal_convert_to_tensor(var, as_ref=True)
if not _tensor_comes_from_variable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
if var.op.type == "ReadVariableOp":
name = var.op.inputs[0].op.name
else:
name = var.op.name
if name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_saveables[name] = var
# pylint: enable=protected-access
return names_to_saveables
def _add_saveable(saveables, seen_ops, saveable):
"""Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed.
"""
if saveable.op in seen_ops:
raise ValueError("The same saveable will be restored with two names: %s" %
saveable.name)
saveables.append(saveable)
seen_ops.add(saveable.op)
def validate_and_slice_inputs(names_to_saveables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of SaveableObjects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a trackable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_saveables, dict):
names_to_saveables = op_list_to_dict(names_to_saveables)
saveables = []
seen_ops = set()
for name, op in sorted(names_to_saveables.items(),
# Avoid comparing ops, sort only by name.
key=lambda x: x[0]):
for converted_saveable_object in saveable_objects_for_op(op, name):
_add_saveable(saveables, seen_ops, converted_saveable_object)
return saveables
|
tensorflow-master
|
tensorflow/python/training/saving/saveable_object_util.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.Module`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import itertools
from absl.testing import parameterized
import six
from tensorflow.python import tf2
from tensorflow.python.distribute import values as distributed_values
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.module import module
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class TestModuleNaming(test_util.TensorFlowTestCase):
def test_single_name(self):
mod = module.Module(name="simple")
self.assertEqual(mod.name, "simple")
self.assertEqual(mod.name_scope.name, "simple/")
def test_construct_in_scope(self):
with ops.name_scope("foo"):
mod = module.Module(name="bar")
self.assertEqual(mod.name, "bar")
self.assertEqual(mod.name_scope.name, "foo/bar/")
def test_enters_name_scope_in_call(self):
mod = ReturnsNameScopeModule()
for _ in range(3):
self.assertEqual(mod(), mod.name_scope.name)
def test_enters_name_scope_in_other_method(self):
mod = ReturnsNameScopeModule()
for _ in range(3):
self.assertEqual(mod.alternative_forward(), mod.name_scope.name)
def test_subclassed_module(self):
mod = SubclassedReturnsNameScopeModule()
for _ in range(3):
self.assertEqual(mod.alternative_forward(), mod.name_scope.name)
self.assertEqual(mod.alternative_alternative_forward(),
mod.name_scope.name)
def test_submodule_created_late(self):
m = TreeModule()
self.assertEqual(m.name, "tree_module")
self.assertEqual(m.name_scope.name, "tree_module/")
leaf1 = m.new_leaf()
self.assertEqual(leaf1.name, "tree_module")
self.assertEqual(leaf1.name_scope.name, "tree_module/tree_module/")
def test_does_not_evaluate_property_methods(self):
mod = PropertyThrowsWhenCalledModule()
with self.assertRaises(AssertionError):
mod.raise_assertion_error # pylint: disable=pointless-statement
def test_overridden_name_scope(self):
mod = ModuleOverridingNameScope()
self.assertEqual(mod(), mod.name_scope.name)
self.assertEqual(mod.alternative_forward(), mod.name_scope.name)
def test_patched_callable(self):
with ops.name_scope("foo"):
mod = module.Module(name="bar")
mod.foo = get_name_scope
# `foo` is not a method so we do not re-enter the name scope.
self.assertEqual(mod.foo(), "")
def test_property(self):
mod = PropertyModule()
mod.some_property = None, None # None, None for the linter.
getter_scope_name, setter_scope_name = mod.some_property
self.assertEqual(getter_scope_name, "property_module/")
self.assertEqual(setter_scope_name, "property_module/")
def test_property_no_name_scope(self):
mod = PropertyModule()
mod.no_name_scope_property = None, None # None, None for the linter.
getter_scope_name, setter_scope_name = mod.no_name_scope_property
self.assertEqual(getter_scope_name, "")
self.assertEqual(setter_scope_name, "")
def test_invalid_name(self):
msg = ".* is not a valid module name"
with self.assertRaisesRegexp(ValueError, msg):
module.Module(name="$Foo")
@test_util.run_in_graph_and_eager_modes
def test_modules_not_numbered_in_eager(self):
if not context.executing_eagerly():
self.skipTest("Eager specific")
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger/")
self.assertEqual(mod.child.name_scope.name, "badger/badger/")
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger/")
self.assertEqual(mod.child.name_scope.name, "badger/badger/")
@test_util.run_in_graph_and_eager_modes
def test_module_numbering_in_graph(self):
if context.executing_eagerly():
self.skipTest("Graph specific")
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger/")
self.assertEqual(mod.child.name_scope.name, "badger/badger/")
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger_1/")
self.assertEqual(mod.child.name_scope.name, "badger_1/badger/")
def test_ctor_error_closes_name_scope(self):
with self.assertRaises(ErrorModuleError):
# If super constructor is called then a name scope is opened then an error
# is thrown. The metaclass should handle this and close the namescope
# before re-throwing the exception.
ErrorModule(call_super=True)
self.assertEqual("", get_name_scope())
def test_ctor_error_handles_ctor_not_opening_name_scope(self):
with self.assertRaises(ErrorModuleError):
# If super ctor is not called then the name scope isn't opened. We need to
# ensure that this doesn't trigger an exception (e.g. the metaclass trying
# to __exit__ a non-existant name scope).
ErrorModule(call_super=False)
self.assertEqual("", get_name_scope())
def test_forward_method_closes_name_scope(self):
mod = ErrorModule(call_super=True, raise_in_constructor=False)
with self.assertRaises(ErrorModuleError):
mod()
self.assertEqual("", get_name_scope())
def test_get_attr_doesnt_enter_name_scope(self):
scope_names = []
class GetAttrModule(module.Module):
def __getattr__(self, name):
scope_names.append((name, get_name_scope()))
return super(GetAttrModule, self).__getattr__(name)
mod = GetAttrModule()
with self.assertRaises(AttributeError):
mod.does_not_exist # pylint: disable=pointless-statement
self.assertIn(("does_not_exist", ""), scope_names)
def test_get_attribute_doesnt_enter_name_scope(self):
scope_names = []
class GetAttributeModule(module.Module):
def __getattribute__(self, name):
scope_names.append((name, get_name_scope()))
return super(GetAttributeModule, self).__getattribute__(name)
mod = GetAttributeModule()
with self.assertRaises(AttributeError):
mod.does_not_exist # pylint: disable=pointless-statement
self.assertIn(("does_not_exist", ""), scope_names)
class VariableNamingTest(test_util.TensorFlowTestCase):
def test_variable_names(self):
mod = RecursiveModule(3)
self.assertEqual(mod.w.name, "badger/mushroom:0")
self.assertEqual(mod.child.w.name, "badger/badger/mushroom:0")
self.assertEqual(mod.child.child.w.name, "badger/badger/badger/mushroom:0")
class NameScopeTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def test_not_memoized_in_tf1(self):
if tf2.enabled():
self.skipTest("Requires TF1")
mod = module.Module(name="name")
name_scope_1 = mod.name_scope
name_scope_2 = mod.name_scope
self.assertIsNot(name_scope_1, name_scope_2)
self.assertEqual(name_scope_1.name, name_scope_2.name)
def test_memoized_in_tf2(self):
if not tf2.enabled():
self.skipTest("Requires TF2")
mod = module.Module(name="name")
name_scope_1 = mod.name_scope
name_scope_2 = mod.name_scope
self.assertIs(name_scope_1, name_scope_2)
class VariableTrackingTest(test_util.TensorFlowTestCase):
def test_variables(self):
m = RecursiveModule(3)
self.assertEqual(m.variables, (m.w, m.child.w, m.child.child.w))
self.assertEqual(m.child.variables, (m.child.w, m.child.child.w))
self.assertEqual(m.child.child.variables, (m.child.child.w,))
def test_trainable_variables(self):
m = RecursiveModule(3)
self.assertEqual(m.trainable_variables,
(m.w, m.child.w, m.child.child.w))
self.assertEqual(m.child.trainable_variables,
(m.child.w, m.child.child.w))
self.assertEqual(m.child.child.trainable_variables, (m.child.child.w,))
def test_trainable_variables_ignores_non_trainable(self):
m = RecursiveModule(3, trainable=False)
self.assertEqual(len(m.trainable_variables), 0)
self.assertEqual(len(m.child.trainable_variables), 0)
self.assertEqual(len(m.child.child.trainable_variables), 0)
def test_supports_distributed_variables(self):
device_map = distributed_values.SingleDeviceMap("/CPU:0")
mirrored = distributed_values.MirroredVariable(
None, device_map, [variables.Variable(1.)],
variables.VariableAggregation.SUM)
tpu = distributed_values.TPUMirroredVariable(
strategy=None,
device_map=device_map,
values=[variables.Variable(42.)],
aggregation=None)
aggregating = distributed_values.AggregatingVariable(
strategy=None, v=variables.Variable(1.), aggregation=None)
m = module.Module()
m.a = mirrored
m.b = tpu
m.c = aggregating
self.assertEqual(m.variables, (mirrored, tpu, aggregating))
class ModuleTrackingTest(test_util.TensorFlowTestCase):
def test_submodules(self):
m = RecursiveModule(3)
self.assertEqual(list(m.submodules), [m.child, m.child.child])
self.assertEqual(list(m.child.submodules), [m.child.child])
self.assertEqual(list(m.child.child.submodules), [])
def test_non_ctor_submodule(self):
m = TreeModule()
leaf1 = m.new_leaf()
self.assertEqual(set(m.submodules), {leaf1})
leaf2 = m.new_leaf()
self.assertEqual(set(m.submodules), {leaf1, leaf2})
class ForwardMethodsTest(test_util.TensorFlowTestCase):
def testFunctionType(self):
mod = ModuleWithFunctionAnnotatedCall()
self.assertTrue(isinstance(mod.forward, def_function.Function))
self.assertTrue(isinstance(mod.forward_ag, def_function.Function))
def testEntersNameScope_call(self):
mod = ModuleWithFunctionAnnotatedCall()
self.assertEqual(self.evaluate(mod.forward()),
b"module_with_function_annotated_call/")
self.assertEqual(self.evaluate(mod.forward_ag()),
b"module_with_function_annotated_call/")
def testEntersNameScope_concreteFunction(self):
mod = ModuleWithFunctionAnnotatedCall()
self.assertEqual(self.evaluate(mod.forward.get_concrete_function()()),
b"module_with_function_annotated_call/")
self.assertEqual(self.evaluate(mod.forward_ag.get_concrete_function()()),
b"module_with_function_annotated_call/")
class AbcTest(test_util.TensorFlowTestCase):
def testAbstract(self):
msg = "Can't instantiate .* abstract methods"
with self.assertRaisesRegexp(TypeError, msg):
AbstractModule() # pylint: disable=abstract-class-instantiated
def testConcrete(self):
mod = ConcreteModule()
x, scope_name = mod(2.)
self.assertEqual(x, 4.)
self.assertEqual(scope_name, "concrete_module/")
self.assertEqual(get_name_scope(), "")
def get_name_scope():
with ops.name_scope("x") as ns:
ns = "/".join(ns.split("/")[:-2])
return ns + "/" if ns else ""
class ErrorModuleError(Exception):
pass
class ErrorModule(module.Module):
def __init__(self, call_super, raise_in_constructor=True):
if call_super:
super(ErrorModule, self).__init__()
if raise_in_constructor:
raise ErrorModuleError("Deliberate error!")
def __call__(self):
raise ErrorModuleError("Deliberate error!")
class RecursiveModule(module.Module):
def __init__(self, depth, trainable=True):
super(RecursiveModule, self).__init__(name="badger")
with self.name_scope:
self.child = None
if depth > 1:
self.child = RecursiveModule(depth - 1, trainable=trainable)
self.w = variables.Variable(1.0, trainable=trainable, name="mushroom")
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(module.Module):
@abc.abstractmethod
def __call__(self, x):
pass
class ConcreteModule(AbstractModule):
@module.Module.with_name_scope
def __call__(self, x):
return x ** 2, get_name_scope()
class TreeModule(module.Module):
def __init__(self, name=None):
super(TreeModule, self).__init__(name=name)
self._leaves = []
@module.Module.with_name_scope
def new_leaf(self, name=None):
leaf = TreeModule(name=name)
self._leaves.append(leaf)
return leaf
class ReturnsNameScopeModule(module.Module):
@module.Module.with_name_scope
def alternative_forward(self):
return get_name_scope()
@module.Module.with_name_scope
def __call__(self):
return get_name_scope()
class SubclassedReturnsNameScopeModule(ReturnsNameScopeModule):
@module.Module.with_name_scope
def alternative_alternative_forward(self):
return get_name_scope()
class PropertyThrowsWhenCalledModule(module.Module):
@property
def raise_assertion_error(self):
raise AssertionError
class ModuleOverridingNameScope(ReturnsNameScopeModule):
@property
def name_scope(self):
return ops.name_scope("yolo/")
class ModuleWithFunctionAnnotatedCall(module.Module):
@def_function.function(autograph=False)
@module.Module.with_name_scope
def forward(self):
return get_name_scope()
@def_function.function(autograph=True)
@module.Module.with_name_scope
def forward_ag(self):
return get_name_scope()
class PropertyModule(module.Module):
def __init__(self):
super(PropertyModule, self).__init__()
self._setter_scope_name = None
@property
@module.Module.with_name_scope
def some_property(self):
getter_scope_name = get_name_scope()
return getter_scope_name, self._setter_scope_name
@some_property.setter
@module.Module.with_name_scope
def some_property(self, my_property):
self._setter_scope_name = get_name_scope()
@property
def no_name_scope_property(self):
getter_scope_name = get_name_scope()
return getter_scope_name, self._setter_scope_name
@no_name_scope_property.setter
def no_name_scope_property(self, my_property):
self._setter_scope_name = get_name_scope()
NamedPair = collections.namedtuple("NamedPair", ("first", "second"))
mk_index_dict = lambda v: dict(enumerate(v))
class FlattenTest(parameterized.TestCase, test_util.TensorFlowTestCase):
@parameterized.parameters(lambda v: NamedPair(*v), list, tuple, mk_index_dict)
def test_flatten(self, container_type):
parent = SimpleModule(container_type=container_type)
child = parent.c
self.assertEqual(
list(parent._flatten(recursive=False, predicate=is_member)),
[parent.a[0], parent.a[1], parent.z])
self.assertEqual(
list(parent._flatten(predicate=is_member)),
[parent.a[0], parent.a[1], parent.z, child.a[0], child.a[1], child.z])
def test_attribute_traversal_key(self):
mod = LayerModule()
self.assertEqual(
mod.variables,
mod._trainable_variables + mod._non_trainable_variables + [mod._bonus])
def test_attributes_to_ignore(self):
class DangerousModule(module.Module):
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
("dangerous_submodule", "dangerous_variable"),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
mod = DangerousModule()
mod.dangerous_submodule = module.Module()
mod.dangerous_variable = variables.Variable(1.)
mod.normal_variable = variables.Variable(2.)
self.assertEmpty(mod.submodules)
self.assertLen(mod.variables, 1)
self.assertEqual(mod.variables[0], mod.normal_variable)
def test_with_path(self):
mod = module.Module()
mod.w = variables.Variable(1.)
mod.encoder = module.Module()
mod.encoder.w = [({"k": mod.w}, {"k": mod.w})]
mod.decoder = mod.encoder
state_dict = dict(
mod._flatten(with_path=True, predicate=module._is_variable))
self.assertEqual(state_dict,
{("w",): mod.w,
("encoder", "w", 0, 0, "k"): mod.encoder.w[0][0]["k"],
("encoder", "w", 0, 1, "k"): mod.encoder.w[0][1]["k"],
("decoder", "w", 0, 0, "k"): mod.decoder.w[0][0]["k"],
("decoder", "w", 0, 1, "k"): mod.decoder.w[0][1]["k"]},)
def test_module_discover_layer_variable(self):
m = module.Module()
m.a = layers.Dense(1)
m.b = layers.Dense(2)
# The weights of the layer has not been created yet.
self.assertEmpty(m.variables)
self.assertLen(m.submodules, 2)
inputs = layers.Input((1,))
m.a(inputs)
m.b(inputs)
variable_list = m.variables
self.assertLen(variable_list, 4)
self.assertEqual(variable_list[0], m.a.kernel)
self.assertEqual(variable_list[1], m.a.bias)
self.assertEqual(variable_list[2], m.b.kernel)
self.assertEqual(variable_list[3], m.b.bias)
def test_model_discover_submodule(self):
m = models.Sequential(layers=[layers.Dense(1),
layers.Dense(2)])
self.assertEqual(m.submodules, (m.layers[0], m.layers[1]))
m(layers.Input((1,)))
self.assertLen(m.variables, 4)
class LayerModule(module.Module):
def __init__(self):
super(LayerModule, self).__init__()
self._trainable_variables = [
variables.Variable(1., name="a"),
variables.Variable(2., name="b"),
]
self._non_trainable_variables = [
variables.Variable(3., name="c"),
variables.Variable(4., name="d"),
]
self._bonus = variables.Variable(5., name="e")
@property
def variables(self):
def key_function(name):
indexes = {"_trainable_variables": 0, "_non_trainable_variables": 1}
return indexes.get(name, 2), name
return list(
self._flatten(
predicate=module._is_variable,
attribute_traversal_key=key_function))
class MemberType(object):
"""A simple type to search for."""
pass
class SimpleModule(module.Module):
def __init__(self, create_child=True, container_type=list):
super(SimpleModule, self).__init__()
self.z = MemberType()
self.a = container_type([MemberType(), MemberType()])
if create_child:
self.c = SimpleModule(create_child=False)
is_member = lambda v: isinstance(v, MemberType)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/module/module_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Modules encapsulate building stateful components."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
@tf_export("Module")
class Module(tracking.AutoTrackable):
"""Base neural network module class.
A module is a named container for `tf.Variable`s, other `tf.Module`s and
functions which apply to user input. For example a dense layer in a neural
network might be implemented as a `tf.Module`:
```python
class Dense(tf.Module):
def __init__(self, in_features, output_features, name=None):
super(Dense, self).__init__(name=name)
self.w = tf.Variable(
tf.random.normal([input_features, output_features]), name='w')
self.b = tf.Variable(tf.zeros([output_features]), name='b')
def __call__(self, x):
y = tf.matmul(x, self.w) + self.b
return tf.nn.relu(y)
```
You can use the Dense layer as you would expect:
```python
d = Dense(input_features=64, output_features=10)
d(tf.ones([100, 64]))
#==> <tf.Tensor: ...>
```
By subclassing `tf.Module` instead of `object` any `tf.Variable` or
`tf.Module` instances assigned to object properties can be collected using
the `variables`, `trainable_variables` or `submodules` property:
```python
d.variables
#==> (<tf.Variable 'b:0' ...>, <tf.Variable 'w:0' ...>)
```
Subclasses of `tf.Module` can also take advantage of the `_flatten` method
which can be used to implement tracking of any other types.
All `tf.Module` classes have an associated `tf.name_scope` which can be used
to group operations in TensorBoard and create hierarchies for variable names
which can help with debugging. We suggest using the name scope when creating
nested submodules/parameters or for forward methods whose graph you might want
to inspect in TensorBoard. You can enter the name scope explicitly using
`with self.name_scope:` or you can annotate methods (apart from `__init__`)
with `@tf.Module.with_name_scope`.
```python
class MLP(tf.Module):
def __init__(self, input_size, sizes, name=None):
super(MLP, self).__init__(name=name)
self.layers = []
with self.name_scope:
for size in sizes:
self.layers.append(Dense(input_size=input_size, output_size=size))
input_size = size
@tf.Module.with_name_scope
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
```
"""
# AutoTrackable adds object attributes that users will not expect us to
# include when flattening (these reference dependencies reachable via other
# object attributes).
_TF_MODULE_IGNORED_PROPERTIES = frozenset((
"_self_unconditional_checkpoint_dependencies",
"_self_unconditional_dependency_names"
))
def __init__(self, name=None):
if name is None:
name = camel_to_snake(type(self).__name__)
else:
if not valid_identifier(name):
raise ValueError(
"%r is not a valid module name. Module names must be valid Python "
"identifiers (e.g. a valid class name)." % name)
self._name = name
if tf2.enabled():
with ops.name_scope_v2(name) as scope_name:
self._name_scope = ops.name_scope_v2(scope_name)
else:
with ops.name_scope(name) as scope_name:
self._scope_name = scope_name
@property
def name(self):
"""Returns the name of this module as passed or determined in the ctor.
NOTE: This is not the same as the `self.name_scope.name` which includes
parent module names.
"""
return self._name
@property
def name_scope(self):
"""Returns a `tf.name_scope` instance for this class."""
if tf2.enabled():
return self._name_scope
else:
# In TF1 name_scope is not re-entrant in eager so we cannot memoize it.
return ops.name_scope(self._scope_name)
@property
def variables(self):
"""Sequence of variables owned by this module and it's submodules.
Note: this method uses reflection to find variables on the current instance
and submodules. For performance reasons you may wish to cache the result
of calling this method if you don't expect the return value to change.
Returns:
A sequence of variables for the current module (sorted by attribute
name) followed by variables from all submodules recursively (breadth
first).
"""
return tuple(self._flatten(predicate=_is_variable))
@property
def trainable_variables(self):
"""Sequence of variables owned by this module and it's submodules.
Note: this method uses reflection to find variables on the current instance
and submodules. For performance reasons you may wish to cache the result
of calling this method if you don't expect the return value to change.
Returns:
A sequence of variables for the current module (sorted by attribute
name) followed by variables from all submodules recursively (breadth
first).
"""
return tuple(self._flatten(predicate=_is_trainable_variable))
@property
def submodules(self):
"""Sequence of all sub-modules.
Submodules are modules which are properties of this module, or found as
properties of modules which are properties of this module (and so on).
```
a = tf.Module()
b = tf.Module()
c = tf.Module()
a.b = b
b.c = c
assert list(a.submodules) == [b, c]
assert list(b.submodules) == [c]
assert list(c.submodules) == []
```
Returns:
A sequence of all submodules.
"""
return tuple(self._flatten(predicate=_is_module))
def _flatten(self,
recursive=True,
predicate=None,
attribute_traversal_key=None,
with_path=False):
"""Flattened attribute values in sorted order by attribute name.
Modules are flattened by first walking their attributes in name order.
Each attribute value is then flattened to find leaf values. If flatten is
to be applied `recursive`ly then if the leaf is a `Module` it will also be
flattened to find leaves. Finally every leaf value is optionally tested
against the given `predicate` and finally yielded.
```
class Foo(tf.Module):
def __init__(self):
super(Foo, self).__init__()
self.x = [tf.constant('a'), tf.constant('b')]
self.y = {'i': tf.constant('c'), 'j': tf.constant('d')}
self.z = tf.constant('e')
@property
def tensors(self):
return tuple(self._flatten(predicate=is_tensor, with_path=True))
foo = Foo()
foo.tensors
# ==> ((('x', 0), <tf.Tensor: ...'a'>),
# (('x', 1), <tf.Tensor: ...'b'>),
# (('y', 'i'), <tf.Tensor: ...'c'>),
# (('y', 'j'), <tf.Tensor: ...'d'>),
# (('z',), <tf.Tensor: ...'e'>))
```
`attribute_traversal_key` controls the order object properties are visited.
If not set objects are visited in ascending order by name.
Args:
recursive: Whether to recurse into child modules or not.
predicate: (Optional) If set then only values matching predicate are
yielded. A value of `None` (the default) means no items will be
filtered.
attribute_traversal_key: (Optional) Method to rekey object attributes
before they are sorted. Contract is the same as `key` argument to
builtin `sorted` and only applies to object properties.
with_path: (Optional) Whether to include the path to the object as well
as the object itself. If `with_path` is `True` then leaves will not be
de-duplicated (e.g. if the same leaf instance is reachable via multiple
modules then it will be yielded multiple times with different paths).
Returns:
Flat generator for leaves of the current module and optionally all
submodules.
"""
if predicate is None:
predicate = lambda _: True
return _flatten_module(
self,
recursive=recursive,
predicate=predicate,
attributes_to_ignore=self._TF_MODULE_IGNORED_PROPERTIES,
attribute_traversal_key=attribute_traversal_key,
with_path=with_path)
@classmethod
def with_name_scope(cls, method):
"""Decorator to automatically enter the module name scope.
```
class MyModule(tf.Module):
@tf.Module.with_name_scope
def __call__(self, x):
if not hasattr(self, 'w'):
self.w = tf.Variable(tf.random.normal([x.shape[1], 64]))
return tf.matmul(x, self.w)
```
Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose
names included the module name:
```
mod = MyModule()
mod(tf.ones([8, 32]))
# ==> <tf.Tensor: ...>
mod.w
# ==> <tf.Variable ...'my_module/w:0'>
```
Args:
method: The method to wrap.
Returns:
The original method wrapped such that it enters the module's name scope.
"""
def method_with_name_scope(self, *args, **kwargs):
with self.name_scope:
return method(self, *args, **kwargs)
return tf_decorator.make_decorator(method, method_with_name_scope)
def _is_variable(obj):
return isinstance(obj, variables.Variable)
def _is_trainable_variable(obj):
return _is_variable(obj) and getattr(obj, "trainable", False)
def _is_module(obj):
return isinstance(obj, Module)
_CAMEL_TO_SNAKE_R = re.compile(r"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))")
_VALID_IDENTIFIER = re.compile(r"^[a-zA-Z_]([a-zA-Z0-9_])*$")
def valid_identifier(name):
return bool(_VALID_IDENTIFIER.match(name))
def camel_to_snake(value):
return _CAMEL_TO_SNAKE_R.sub(r"_\1", value).lower()
def _flatten_module(module,
recursive,
predicate,
attribute_traversal_key,
attributes_to_ignore,
with_path,
module_path=(),
seen=None):
"""Implementation of `flatten`."""
if seen is None:
seen = set([id(module)])
module_dict = vars(module)
submodules = []
for key in sorted(module_dict, key=attribute_traversal_key):
if key in attributes_to_ignore:
continue
for leaf_path, leaf in nest.flatten_with_tuple_paths(module_dict[key]):
leaf_path = (key,) + leaf_path
# TODO(tomhennigan) Handle cycles for `with_path=True` (e.g. `a.a = a`).
if not with_path:
leaf_id = id(leaf)
if leaf_id in seen:
continue
seen.add(leaf_id)
if predicate(leaf):
if with_path:
yield module_path + leaf_path, leaf
else:
yield leaf
if recursive and _is_module(leaf):
# Walk direct properties first then recurse.
submodules.append((module_path + leaf_path, leaf))
for submodule_path, submodule in submodules:
subvalues = _flatten_module(
submodule,
recursive=recursive,
predicate=predicate,
attribute_traversal_key=attribute_traversal_key,
attributes_to_ignore=submodule._TF_MODULE_IGNORED_PROPERTIES,
with_path=with_path,
module_path=submodule_path,
seen=seen)
for subvalue in subvalues:
# Predicate is already tested for these values.
yield subvalue
|
tensorflow-master
|
tensorflow/python/module/module.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import router for file_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.lib.io.file_io import copy as Copy
from tensorflow.python.lib.io.file_io import create_dir as MkDir
from tensorflow.python.lib.io.file_io import delete_file as Remove
from tensorflow.python.lib.io.file_io import delete_recursively as DeleteRecursively
from tensorflow.python.lib.io.file_io import file_exists as Exists
from tensorflow.python.lib.io.file_io import FileIO as _FileIO
from tensorflow.python.lib.io.file_io import get_matching_files as Glob
from tensorflow.python.lib.io.file_io import is_directory as IsDirectory
from tensorflow.python.lib.io.file_io import list_directory as ListDirectory
from tensorflow.python.lib.io.file_io import recursive_create_dir as MakeDirs
from tensorflow.python.lib.io.file_io import rename as Rename
from tensorflow.python.lib.io.file_io import stat as Stat
from tensorflow.python.lib.io.file_io import walk as Walk
# pylint: enable=unused-import
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
@tf_export('io.gfile.GFile', v1=['gfile.GFile', 'gfile.Open', 'io.gfile.GFile'])
class GFile(_FileIO):
"""File I/O wrappers without thread locking.
Note, that this is somewhat like builtin Python file I/O, but
there are semantic differences to make it more efficient for
some backing filesystems. For example, a write mode file will
not be opened until the first write call (to minimize RPC
invocations in network filesystems).
"""
def __init__(self, name, mode='r'):
super(GFile, self).__init__(name=name, mode=mode)
@tf_export(v1=['gfile.FastGFile'])
class FastGFile(_FileIO):
"""File I/O wrappers without thread locking.
Note, that this is somewhat like builtin Python file I/O, but
there are semantic differences to make it more efficient for
some backing filesystems. For example, a write mode file will
not be opened until the first write call (to minimize RPC
invocations in network filesystems).
"""
@deprecated(None, 'Use tf.gfile.GFile.')
def __init__(self, name, mode='r'):
super(FastGFile, self).__init__(name=name, mode=mode)
# Does not alias to Open so that we use our version of GFile to strip
# 'b' mode.
Open = GFile
|
tensorflow-master
|
tensorflow/python/platform/gfile.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test to make sure stack trace is generated in case of test failures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import signal
import subprocess
import sys
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
# FLAGS defined at the bottom:
# child (bool) set to true if we are running in the child process.
FLAGS = None
_CHILD_FLAG_HELP = 'Boolean. Set to true if this is the child process.'
class StacktraceHandlerTest(test.TestCase):
def testChildProcessKillsItself(self):
if FLAGS.child:
os.kill(os.getpid(), signal.SIGABRT)
def testGeneratesStacktrace(self):
if FLAGS.child:
return
# Subprocess sys.argv[0] with --child=True
if sys.executable:
child_process = subprocess.Popen(
[sys.executable, sys.argv[0], '--child=True'], cwd=os.getcwd(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
child_process = subprocess.Popen(
[sys.argv[0], '--child=True'], cwd=os.getcwd(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Capture its output. capture both stdout and stderr and append them.
# We are not worried about timing or order of messages in this test.
child_stdout, child_stderr = child_process.communicate()
child_output = child_stdout + child_stderr
# Make sure the child process is dead before we proceed.
child_process.wait()
logging.info('Output from the child process:')
logging.info(child_output)
# Verify a stack trace is printed.
self.assertIn(b'PyEval_EvalFrame', child_output)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--child', type=bool, default=False, help=_CHILD_FLAG_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
tensorflow-master
|
tensorflow/python/platform/stacktrace_handler_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A no-op implementation of status bar functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def SetupStatusBarInsideGoogle(unused_link_text, unused_port):
pass
|
tensorflow-master
|
tensorflow/python/platform/status_bar.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the generated build_info script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import build_info
from tensorflow.python.platform import test
class BuildInfoTest(test.TestCase):
def testBuildInfo(self):
self.assertEqual(build_info.is_cuda_build, test.is_built_with_cuda())
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/platform/build_info_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging utilities."""
# pylint: disable=unused-import
# pylint: disable=g-bad-import-order
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import os as _os
import sys as _sys
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
import threading
import six
from tensorflow.python.util.tf_export import tf_export
# Don't use this directly. Use get_logger() instead.
_logger = None
_logger_lock = threading.Lock()
def _get_caller(offset=3):
"""Returns a code and frame object for the lowest non-logging stack frame."""
# Use sys._getframe(). This avoids creating a traceback object.
# pylint: disable=protected-access
f = _sys._getframe(offset)
# pylint: enable=protected-access
our_file = f.f_code.co_filename
f = f.f_back
while f:
code = f.f_code
if code.co_filename != our_file:
return code, f
f = f.f_back
return None, None
# The definition of `findCaller` changed in Python 3.2
if _sys.version_info.major >= 3 and _sys.version_info.minor >= 2:
def _logger_find_caller(stack_info=False): # pylint: disable=g-wrong-blank-lines
code, frame = _get_caller(4)
sinfo = None
if stack_info:
sinfo = '\n'.join(_traceback.format_stack())
if code:
return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
else:
return '(unknown file)', 0, '(unknown function)', sinfo
else:
def _logger_find_caller(): # pylint: disable=g-wrong-blank-lines
code, frame = _get_caller(4)
if code:
return (code.co_filename, frame.f_lineno, code.co_name)
else:
return '(unknown file)', 0, '(unknown function)'
@tf_export('get_logger')
def get_logger():
"""Return TF logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('tensorflow')
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1: _interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
@tf_export(v1=['logging.log'])
def log(level, msg, *args, **kwargs):
get_logger().log(level, msg, *args, **kwargs)
@tf_export(v1=['logging.debug'])
def debug(msg, *args, **kwargs):
get_logger().debug(msg, *args, **kwargs)
@tf_export(v1=['logging.error'])
def error(msg, *args, **kwargs):
get_logger().error(msg, *args, **kwargs)
@tf_export(v1=['logging.fatal'])
def fatal(msg, *args, **kwargs):
get_logger().fatal(msg, *args, **kwargs)
@tf_export(v1=['logging.info'])
def info(msg, *args, **kwargs):
get_logger().info(msg, *args, **kwargs)
@tf_export(v1=['logging.warn'])
def warn(msg, *args, **kwargs):
get_logger().warning(msg, *args, **kwargs)
@tf_export(v1=['logging.warning'])
def warning(msg, *args, **kwargs):
get_logger().warning(msg, *args, **kwargs)
_level_names = {
FATAL: 'FATAL',
ERROR: 'ERROR',
WARN: 'WARN',
INFO: 'INFO',
DEBUG: 'DEBUG',
}
# Mask to convert integer thread ids to unsigned quantities for logging
# purposes
_THREAD_ID_MASK = 2 * _sys.maxsize + 1
_log_prefix = None # later set to google2_log_prefix
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
@tf_export(v1=['logging.TaskLevelStatusMessage'])
def TaskLevelStatusMessage(msg):
error(msg)
@tf_export(v1=['logging.flush'])
def flush():
raise NotImplementedError()
# Code below is taken from pyglib/logging
@tf_export(v1=['logging.vlog'])
def vlog(level, msg, *args, **kwargs):
get_logger().log(level, msg, *args, **kwargs)
def _GetNextLogCountPerToken(token):
"""Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
"""
global _log_counter_per_token # pylint: disable=global-variable-not-assigned
_log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)
return _log_counter_per_token[token]
@tf_export(v1=['logging.log_every_n'])
def log_every_n(level, msg, n, *args):
"""Log 'msg % args' at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, not (count % n), *args)
@tf_export(v1=['logging.log_first_n'])
def log_first_n(level, msg, n, *args): # pylint: disable=g-bad-name
"""Log 'msg % args' at level 'level' only first 'n' times.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, count < n, *args)
@tf_export(v1=['logging.log_if'])
def log_if(level, msg, condition, *args):
"""Log 'msg % args' at level 'level' only if condition is fulfilled."""
if condition:
vlog(level, msg, *args)
def _GetFileAndLine():
"""Returns (filename, linenumber) for the stack frame."""
code, f = _get_caller()
if not code:
return ('<unknown>', 0)
return (code.co_filename, f.f_lineno)
def google2_log_prefix(level, timestamp=None, file_and_line=None):
"""Assemble a logline prefix using the google2 format."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _GetFileAndLine()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] ' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
_get_thread_id(),
basename,
line)
return s
@tf_export(v1=['logging.get_verbosity'])
def get_verbosity():
"""Return how much logging output will be produced."""
return get_logger().getEffectiveLevel()
@tf_export(v1=['logging.set_verbosity'])
def set_verbosity(v):
"""Sets the threshold for what messages will be logged."""
get_logger().setLevel(v)
def _get_thread_id():
"""Get id of current thread, suitable for logging as an unsigned quantity."""
# pylint: disable=protected-access
thread_id = six.moves._thread.get_ident()
# pylint:enable=protected-access
return thread_id & _THREAD_ID_MASK
_log_prefix = google2_log_prefix
tf_export(v1=['logging.DEBUG']).export_constant(__name__, 'DEBUG')
tf_export(v1=['logging.ERROR']).export_constant(__name__, 'ERROR')
tf_export(v1=['logging.FATAL']).export_constant(__name__, 'FATAL')
tf_export(v1=['logging.INFO']).export_constant(__name__, 'INFO')
tf_export(v1=['logging.WARN']).export_constant(__name__, 'WARN')
|
tensorflow-master
|
tensorflow/python/platform/tf_logging.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to run benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import os
import re
import sys
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import timeline
from tensorflow.python.framework import ops
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
# When a subclass of the Benchmark class is created, it is added to
# the registry automatically
GLOBAL_BENCHMARK_REGISTRY = set()
# Environment variable that determines whether benchmarks are written.
# See also tensorflow/core/util/reporter.h TestReporter::kTestReporterEnv.
TEST_REPORTER_TEST_ENV = "TEST_REPORT_FILE_PREFIX"
# Environment variable that lets the TensorFlow runtime allocate a new
# threadpool for each benchmark.
OVERRIDE_GLOBAL_THREADPOOL = "TF_OVERRIDE_GLOBAL_THREADPOOL"
def _global_report_benchmark(
name, iters=None, cpu_time=None, wall_time=None,
throughput=None, extras=None, metrics=None):
"""Method for recording a benchmark directly.
Args:
name: The BenchmarkEntry name.
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
metrics: (optional) A list of dict representing metrics generated by the
benchmark. Each dict should contain keys 'name' and'value'. A dict
can optionally contain keys 'min_value' and 'max_value'.
Raises:
TypeError: if extras is not a dict.
IOError: if the benchmark output file already exists.
"""
logging.info("Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,"
"throughput: %g, extras: %s, metrics: %s", name,
iters if iters is not None else -1,
wall_time if wall_time is not None else -1,
cpu_time if cpu_time is not None else -1,
throughput if throughput is not None else -1,
str(extras) if extras else "None",
str(metrics) if metrics else "None")
entries = test_log_pb2.BenchmarkEntries()
entry = entries.entry.add()
entry.name = name
if iters is not None:
entry.iters = iters
if cpu_time is not None:
entry.cpu_time = cpu_time
if wall_time is not None:
entry.wall_time = wall_time
if throughput is not None:
entry.throughput = throughput
if extras is not None:
if not isinstance(extras, dict):
raise TypeError("extras must be a dict")
for (k, v) in extras.items():
if isinstance(v, numbers.Number):
entry.extras[k].double_value = v
else:
entry.extras[k].string_value = str(v)
if metrics is not None:
if not isinstance(metrics, list):
raise TypeError("metrics must be a list")
for metric in metrics:
if "name" not in metric:
raise TypeError("metric must has a 'name' field")
if "value" not in metric:
raise TypeError("metric must has a 'value' field")
metric_entry = entry.metrics.add()
metric_entry.name = metric["name"]
metric_entry.value = metric["value"]
if "min_value" in metric:
metric_entry.min_value.value = metric["min_value"]
if "max_value" in metric:
metric_entry.max_value.value = metric["max_value"]
test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
if test_env is None:
# Reporting was not requested, just print the proto
print(str(entries))
return
serialized_entry = entries.SerializeToString()
mangled_name = name.replace("/", "__")
output_path = "%s%s" % (test_env, mangled_name)
if gfile.Exists(output_path):
raise IOError("File already exists: %s" % output_path)
with gfile.GFile(output_path, "wb") as out:
out.write(serialized_entry)
class _BenchmarkRegistrar(type):
"""The Benchmark class registrar. Used by abstract Benchmark class."""
def __new__(mcs, clsname, base, attrs):
newclass = super(mcs, _BenchmarkRegistrar).__new__(
mcs, clsname, base, attrs)
if not newclass.is_abstract():
GLOBAL_BENCHMARK_REGISTRY.add(newclass)
return newclass
class Benchmark(six.with_metaclass(_BenchmarkRegistrar, object)):
"""Abstract class that provides helper functions for running benchmarks.
Any class subclassing this one is immediately registered in the global
benchmark registry.
Only methods whose names start with the word "benchmark" will be run during
benchmarking.
"""
@classmethod
def is_abstract(cls):
# mro: (_BenchmarkRegistrar, Benchmark) means this is Benchmark
return len(cls.mro()) <= 2
def _get_name(self, overwrite_name=None):
"""Returns full name of class and method calling report_benchmark."""
# Find the caller method (outermost Benchmark class)
stack = tf_inspect.stack()
calling_class = None
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, Benchmark):
calling_class = f_self # Get the outermost stack Benchmark call
name = frame[3] # Get the method name
break
if calling_class is None:
raise ValueError("Unable to determine calling Benchmark class.")
# Use the method name, or overwrite_name is provided.
name = overwrite_name or name
# Prefix the name with the class name.
class_name = type(calling_class).__name__
name = "%s.%s" % (class_name, name)
return name
def report_benchmark(
self,
iters=None,
cpu_time=None,
wall_time=None,
throughput=None,
extras=None,
name=None,
metrics=None):
"""Report a benchmark.
Args:
iters: (optional) How many iterations were run
cpu_time: (optional) Median or mean cpu time in seconds.
wall_time: (optional) Median or mean wall time in seconds.
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Values may be either floats or values that are convertible to strings.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the top-level method name.
metrics: (optional) A list of dict, where each dict has the keys below
name (required), string, metric name
value (required), double, metric value
min_value (optional), double, minimum acceptable metric value
max_value (optional), double, maximum acceptable metric value
"""
name = self._get_name(overwrite_name=name)
_global_report_benchmark(
name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
throughput=throughput, extras=extras, metrics=metrics)
@tf_export("test.benchmark_config")
def benchmark_config():
"""Returns a tf.compat.v1.ConfigProto for disabling the dependency optimizer.
Returns:
A TensorFlow ConfigProto object.
"""
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.dependency_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
@tf_export("test.Benchmark")
class TensorFlowBenchmark(Benchmark):
"""Abstract class that provides helpers for TensorFlow benchmarks."""
def __init__(self):
# Allow TensorFlow runtime to allocate a new threadpool with different
# number of threads for each new benchmark.
os.environ[OVERRIDE_GLOBAL_THREADPOOL] = "1"
super(TensorFlowBenchmark, self).__init__()
@classmethod
def is_abstract(cls):
# mro: (_BenchmarkRegistrar, Benchmark, TensorFlowBenchmark) means
# this is TensorFlowBenchmark.
return len(cls.mro()) <= 3
def run_op_benchmark(self,
sess,
op_or_tensor,
feed_dict=None,
burn_iters=2,
min_iters=10,
store_trace=False,
store_memory_usage=True,
name=None,
extras=None,
mbs=0):
"""Run an op or tensor in the given session. Report the results.
Args:
sess: `Session` object to use for timing.
op_or_tensor: `Operation` or `Tensor` to benchmark.
feed_dict: A `dict` of values to feed for each op iteration (see the
`feed_dict` parameter of `Session.run`).
burn_iters: Number of burn-in iterations to run.
min_iters: Minimum number of iterations to use for timing.
store_trace: Boolean, whether to run an extra untimed iteration and
store the trace of iteration in returned extras.
The trace will be stored as a string in Google Chrome trace format
in the extras field "full_trace_chrome_format". Note that trace
will not be stored in test_log_pb2.TestResults proto.
store_memory_usage: Boolean, whether to run an extra untimed iteration,
calculate memory usage, and store that in extras fields.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the top-level method name.
extras: (optional) Dict mapping string keys to additional benchmark info.
Values may be either floats or values that are convertible to strings.
mbs: (optional) The number of megabytes moved by this op, used to
calculate the ops throughput.
Returns:
A `dict` containing the key-value pairs that were passed to
`report_benchmark`. If `store_trace` option is used, then
`full_chrome_trace_format` will be included in return dictionary even
though it is not passed to `report_benchmark` with `extras`.
"""
for _ in range(burn_iters):
sess.run(op_or_tensor, feed_dict=feed_dict)
deltas = [None] * min_iters
for i in range(min_iters):
start_time = time.time()
sess.run(op_or_tensor, feed_dict=feed_dict)
end_time = time.time()
delta = end_time - start_time
deltas[i] = delta
extras = extras if extras is not None else {}
unreported_extras = {}
if store_trace or store_memory_usage:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(op_or_tensor, feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
tl = timeline.Timeline(run_metadata.step_stats)
if store_trace:
unreported_extras["full_trace_chrome_format"] = (
tl.generate_chrome_trace_format())
if store_memory_usage:
step_stats_analysis = tl.analyze_step_stats(show_memory=True)
allocator_maximums = step_stats_analysis.allocator_maximums
for k, v in allocator_maximums.items():
extras["allocator_maximum_num_bytes_%s" % k] = v.num_bytes
def _median(x):
if not x:
return -1
s = sorted(x)
l = len(x)
lm1 = l - 1
return (s[l//2] + s[lm1//2]) / 2.0
median_delta = _median(deltas)
benchmark_values = {
"iters": min_iters,
"wall_time": median_delta,
"extras": extras,
"name": name,
"throughput": mbs / median_delta
}
self.report_benchmark(**benchmark_values)
benchmark_values["extras"].update(unreported_extras)
return benchmark_values
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
sess = ops.get_default_session() or self.cached_session()
return sess.run(tensors)
def _run_benchmarks(regex):
"""Run benchmarks that match regex `regex`.
This function goes through the global benchmark registry, and matches
benchmark class and method names of the form
`module.name.BenchmarkClass.benchmarkMethod` to the given regex.
If a method matches, it is run.
Args:
regex: The string regular expression to match Benchmark classes against.
"""
registry = list(GLOBAL_BENCHMARK_REGISTRY)
# Match benchmarks in registry against regex
for benchmark in registry:
benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
attrs = dir(benchmark)
# Don't instantiate the benchmark class unless necessary
benchmark_instance = None
for attr in attrs:
if not attr.startswith("benchmark"):
continue
candidate_benchmark_fn = getattr(benchmark, attr)
if not callable(candidate_benchmark_fn):
continue
full_benchmark_name = "%s.%s" % (benchmark_name, attr)
if regex == "all" or re.search(regex, full_benchmark_name):
# Instantiate the class if it hasn't been instantiated
benchmark_instance = benchmark_instance or benchmark()
# Get the method tied to the class
instance_benchmark_fn = getattr(benchmark_instance, attr)
# Call the instance method
instance_benchmark_fn()
def benchmarks_main(true_main, argv=None):
"""Run benchmarks as declared in argv.
Args:
true_main: True main function to run if benchmarks are not requested.
argv: the command line arguments (if None, uses sys.argv).
"""
if argv is None:
argv = sys.argv
found_arg = [arg for arg in argv
if arg.startswith("--benchmarks=")
or arg.startswith("-benchmarks=")]
if found_arg:
# Remove --benchmarks arg from sys.argv
argv.remove(found_arg[0])
regex = found_arg[0].split("=")[1]
app.run(lambda _: _run_benchmarks(regex), argv=argv)
else:
true_main()
|
tensorflow-master
|
tensorflow/python/platform/benchmark.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sanity tests for tf.flags."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import unittest
from absl import flags as absl_flags
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
flags.DEFINE_string(
flag_name='old_string', default_value='default', docstring='docstring')
flags.DEFINE_string(
name='new_string', default='default', help='docstring')
flags.DEFINE_integer(
flag_name='old_integer', default_value=1, docstring='docstring')
flags.DEFINE_integer(
name='new_integer', default=1, help='docstring')
flags.DEFINE_float(
flag_name='old_float', default_value=1.5, docstring='docstring')
flags.DEFINE_float(
name='new_float', default=1.5, help='docstring')
flags.DEFINE_bool(
flag_name='old_bool', default_value=True, docstring='docstring')
flags.DEFINE_bool(
name='new_bool', default=True, help='docstring')
flags.DEFINE_boolean(
flag_name='old_boolean', default_value=False, docstring='docstring')
flags.DEFINE_boolean(
name='new_boolean', default=False, help='docstring')
class FlagsTest(unittest.TestCase):
def setUp(self):
self.original_flags = flags.FlagValues()
self.wrapped_flags = flags._FlagValuesWrapper(self.original_flags)
flags.DEFINE_string(
'test', 'default', 'test flag', flag_values=self.wrapped_flags)
def test_attribute_overrides(self):
# Test that methods defined in absl.flags.FlagValues are the same as the
# wrapped ones.
self.assertEqual(flags.FLAGS.is_parsed, absl_flags.FLAGS.is_parsed)
def test_getattr(self):
self.assertFalse(self.wrapped_flags.is_parsed())
with test.mock.patch.object(sys, 'argv', new=['program', '--test=new']):
self.assertEqual('new', self.wrapped_flags.test)
self.assertTrue(self.wrapped_flags.is_parsed())
def test_setattr(self):
self.assertEqual('default', self.wrapped_flags.test)
self.wrapped_flags.test = 'new'
self.assertEqual('new', self.wrapped_flags.test)
def test_delattr(self):
del self.wrapped_flags.test
self.assertNotIn('test', self.wrapped_flags)
with self.assertRaises(AttributeError):
_ = self.wrapped_flags.test
def test_dir(self):
self.assertEqual(['test'], dir(self.wrapped_flags))
def test_getitem(self):
self.assertIs(self.original_flags['test'], self.wrapped_flags['test'])
def test_setitem(self):
flag = flags.Flag(flags.ArgumentParser(), flags.ArgumentSerializer(),
'fruit', 'apple', 'the fruit type')
self.wrapped_flags['fruit'] = flag
self.assertIs(self.original_flags['fruit'], self.wrapped_flags['fruit'])
self.assertEqual('apple', self.wrapped_flags.fruit)
def test_len(self):
self.assertEqual(1, len(self.wrapped_flags))
def test_iter(self):
self.assertEqual(['test'], list(self.wrapped_flags))
def test_str(self):
self.assertEqual(str(self.wrapped_flags), str(self.original_flags))
def test_call(self):
self.wrapped_flags(['program', '--test=new'])
self.assertEqual('new', self.wrapped_flags.test)
def test_keyword_arguments(self):
test_cases = (
('old_string', 'default'),
('new_string', 'default'),
('old_integer', 1),
('new_integer', 1),
('old_float', 1.5),
('new_float', 1.5),
('old_bool', True),
('new_bool', True),
('old_boolean', False),
('new_boolean', False),
)
for flag_name, default_value in test_cases:
self.assertEqual(default_value, absl_flags.FLAGS[flag_name].default)
self.assertEqual('docstring', absl_flags.FLAGS[flag_name].help)
if __name__ == '__main__':
unittest.main()
|
tensorflow-master
|
tensorflow/python/platform/flags_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for our flags implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_boolean('myflag', False, '')
def main(argv):
if (len(argv) != 3):
print("Length of argv was not 3: ", argv)
sys.exit(-1)
if argv[1] != "--passthrough":
print("--passthrough argument not in argv")
sys.exit(-1)
if argv[2] != "extra":
print("'extra' argument not in argv")
sys.exit(-1)
if __name__ == '__main__':
sys.argv.extend(["--myflag", "--passthrough", "extra"])
app.run()
|
tensorflow-master
|
tensorflow/python/platform/app_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import resource_loader
class DefaultResourceLoaderTest(googletest.TestCase):
def test_exception(self):
with self.assertRaises(IOError):
resource_loader.load_resource("/fake/file/path/dne")
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/platform/resource_loader_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
class EventLoaderTest(googletest.TestCase):
def test_log(self):
# Just check that logging works without raising an exception.
logging.error("test log message")
if __name__ == "__main__":
googletest.main()
|
tensorflow-master
|
tensorflow/python/platform/logging_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Switch between Google or open source dependencies."""
# Switch between Google and OSS dependencies
USE_OSS = True
# Per-dependency switches determining whether each dependency is ready
# to be replaced by its OSS equivalence.
# TODO(danmane,mrry,opensource): Flip these switches, then remove them
OSS_APP = True
OSS_FLAGS = True
OSS_GFILE = True
OSS_GOOGLETEST = True
OSS_LOGGING = True
OSS_PARAMETERIZED = True
|
tensorflow-master
|
tensorflow/python/platform/control_imports.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Import router for absl.flags. See https://github.com/abseil/abseil-py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging as _logging
import sys as _sys
# go/tf-wildcard-import
from absl.flags import * # pylint: disable=wildcard-import
import six as _six
from tensorflow.python.util import tf_decorator
# Since we wrap absl.flags DEFINE functions, we need to declare this module
# does not affect key flags.
disclaim_key_flags() # pylint: disable=undefined-variable
_RENAMED_ARGUMENTS = {
'flag_name': 'name',
'default_value': 'default',
'docstring': 'help',
}
def _wrap_define_function(original_function):
"""Wraps absl.flags's define functions so tf.flags accepts old names."""
def wrapper(*args, **kwargs):
"""Wrapper function that turns old keyword names to new ones."""
has_old_names = False
for old_name, new_name in _six.iteritems(_RENAMED_ARGUMENTS):
if old_name in kwargs:
has_old_names = True
value = kwargs.pop(old_name)
kwargs[new_name] = value
if has_old_names:
_logging.warning(
'Use of the keyword argument names (flag_name, default_value, '
'docstring) is deprecated, please use (name, default, help) instead.')
return original_function(*args, **kwargs)
return tf_decorator.make_decorator(original_function, wrapper)
class _FlagValuesWrapper(object):
"""Wrapper class for absl.flags.FLAGS.
The difference is that tf.flags.FLAGS implicitly parses flags with sys.argv
when accessing the FLAGS values before it's explicitly parsed,
while absl.flags.FLAGS raises an exception.
"""
def __init__(self, flags_object):
self.__dict__['__wrapped'] = flags_object
def __getattribute__(self, name):
if name == '__dict__':
return super(_FlagValuesWrapper, self).__getattribute__(name)
return self.__dict__['__wrapped'].__getattribute__(name)
def __getattr__(self, name):
wrapped = self.__dict__['__wrapped']
# To maintain backwards compatibility, implicitly parse flags when reading
# a flag.
if not wrapped.is_parsed():
wrapped(_sys.argv)
return wrapped.__getattr__(name)
def __setattr__(self, name, value):
return self.__dict__['__wrapped'].__setattr__(name, value)
def __delattr__(self, name):
return self.__dict__['__wrapped'].__delattr__(name)
def __dir__(self):
return self.__dict__['__wrapped'].__dir__()
def __getitem__(self, name):
return self.__dict__['__wrapped'].__getitem__(name)
def __setitem__(self, name, flag):
return self.__dict__['__wrapped'].__setitem__(name, flag)
def __len__(self):
return self.__dict__['__wrapped'].__len__()
def __iter__(self):
return self.__dict__['__wrapped'].__iter__()
def __str__(self):
return self.__dict__['__wrapped'].__str__()
def __call__(self, *args, **kwargs):
return self.__dict__['__wrapped'].__call__(*args, **kwargs)
# pylint: disable=invalid-name,used-before-assignment
# absl.flags APIs use `default` as the name of the default value argument.
# Allow the following functions continue to accept `default_value`.
DEFINE_string = _wrap_define_function(DEFINE_string)
DEFINE_boolean = _wrap_define_function(DEFINE_boolean)
DEFINE_bool = DEFINE_boolean
DEFINE_float = _wrap_define_function(DEFINE_float)
DEFINE_integer = _wrap_define_function(DEFINE_integer)
# pylint: enable=invalid-name,used-before-assignment
FLAGS = _FlagValuesWrapper(FLAGS) # pylint: disable=used-before-assignment
|
tensorflow-master
|
tensorflow/python/platform/flags.py
|
tensorflow-master
|
tensorflow/python/platform/__init__.py
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Platform-specific code for checking the integrity of the TensorFlow build."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
try:
from tensorflow.python.platform import build_info
except ImportError:
raise ImportError("Could not import tensorflow. Do not import tensorflow "
"from its source directory; change directory to outside "
"the TensorFlow source tree, and relaunch your Python "
"interpreter from there.")
def preload_check():
"""Raises an exception if the environment is not correctly configured.
Raises:
ImportError: If the check detects that the environment is not correctly
configured, and attempting to load the TensorFlow runtime will fail.
"""
if os.name == "nt":
# Attempt to load any DLLs that the Python extension depends on before
# we load the Python extension, so that we can raise an actionable error
# message if they are not found.
import ctypes # pylint: disable=g-import-not-at-top
if hasattr(build_info, "msvcp_dll_name"):
try:
ctypes.WinDLL(build_info.msvcp_dll_name)
except OSError:
raise ImportError(
"Could not find %r. TensorFlow requires that this DLL be "
"installed in a directory that is named in your %%PATH%% "
"environment variable. You may install this DLL by downloading "
"Visual C++ 2015 Redistributable Update 3 from this URL: "
"https://www.microsoft.com/en-us/download/details.aspx?id=53587"
% build_info.msvcp_dll_name)
else:
# TODO(mrry): Consider adding checks for the Linux and Mac OS X builds.
pass
|
tensorflow-master
|
tensorflow/python/platform/self_check.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing.
See the [Testing](https://tensorflow.org/api_guides/python/test) guide.
Note: `tf.compat.v1.test.mock` is an alias to the python `mock` or
`unittest.mock` depending on the python version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-import-order
from tensorflow.python.framework import test_util as _test_util
from tensorflow.python.platform import googletest as _googletest
# pylint: disable=unused-import
from tensorflow.python.framework.test_util import assert_equal_graph_def
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import gpu_device_name
from tensorflow.python.framework.test_util import is_gpu_available
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.ops.gradient_checker import compute_gradient
# pylint: enable=unused-import,g-bad-import-order
import sys
from tensorflow.python.util.tf_export import tf_export
if sys.version_info.major == 2:
import mock # pylint: disable=g-import-not-at-top,unused-import
else:
from unittest import mock # pylint: disable=g-import-not-at-top,g-importing-member
tf_export(v1=['test.mock'])(mock)
# Import Benchmark class
Benchmark = _googletest.Benchmark # pylint: disable=invalid-name
# Import StubOutForTesting class
StubOutForTesting = _googletest.StubOutForTesting # pylint: disable=invalid-name
@tf_export('test.main')
def main(argv=None):
"""Runs all unit tests."""
_test_util.InstallStackTraceHandler()
return _googletest.main(argv)
@tf_export(v1=['test.get_temp_dir'])
def get_temp_dir():
"""Returns a temporary directory for use during tests.
There is no need to delete the directory after the test.
Returns:
The temporary directory.
"""
return _googletest.GetTempDir()
@tf_export(v1=['test.test_src_dir_path'])
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "core/platform".
Returns:
An absolute path to the linked in runfiles.
"""
return _googletest.test_src_dir_path(relative_path)
@tf_export('test.is_built_with_cuda')
def is_built_with_cuda():
"""Returns whether TensorFlow was built with CUDA (GPU) support."""
return _test_util.IsGoogleCudaEnabled()
|
tensorflow-master
|
tensorflow/python/platform/test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resource management library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os as _os
import sys as _sys
from tensorflow.python.util import tf_inspect as _inspect
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['resource_loader.load_resource'])
def load_resource(path):
"""Load the resource at given path, where path is relative to tensorflow/.
Args:
path: a string resource path relative to tensorflow/.
Returns:
The contents of that resource.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
tensorflow_root = (_os.path.join(
_os.path.dirname(__file__), _os.pardir, _os.pardir))
path = _os.path.join(tensorflow_root, path)
path = _os.path.abspath(path)
with open(path, 'rb') as f:
return f.read()
# pylint: disable=protected-access
@tf_export(v1=['resource_loader.get_data_files_path'])
def get_data_files_path():
"""Get a direct path to the data files colocated with the script.
Returns:
The directory where files specified in data attribute of py_test
and py_binary are stored.
"""
return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
@tf_export(v1=['resource_loader.get_root_dir_with_all_resources'])
def get_root_dir_with_all_resources():
"""Get a root directory containing all the data attributes in the build rule.
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary. Falls back to returning the same as get_data_files_path if it
fails to detect a bazel runfiles directory.
"""
script_dir = get_data_files_path()
# Create a history of the paths, because the data files are located relative
# to the repository root directory, which is directly under runfiles
# directory.
directories = [script_dir]
data_files_dir = ''
while True:
candidate_dir = directories[-1]
current_directory = _os.path.basename(candidate_dir)
if '.runfiles' in current_directory:
# Our file should never be directly under runfiles.
# If the history has only one item, it means we are directly inside the
# runfiles directory, something is wrong, fall back to the default return
# value, script directory.
if len(directories) > 1:
data_files_dir = directories[-2]
break
else:
new_candidate_dir = _os.path.dirname(candidate_dir)
# If we are at the root directory these two will be the same.
if new_candidate_dir == candidate_dir:
break
else:
directories.append(new_candidate_dir)
return data_files_dir or script_dir
@tf_export(v1=['resource_loader.get_path_to_datafile'])
def get_path_to_datafile(path):
"""Get the path to the specified file in the data dependencies.
The path is relative to tensorflow/
Args:
path: a string resource path relative to tensorflow/
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
data_files_path = _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
return _os.path.join(data_files_path, path)
@tf_export(v1=['resource_loader.readahead_file_path'])
def readahead_file_path(path, readahead='128M'): # pylint: disable=unused-argument
"""Readahead files not implemented; simply returns given path."""
return path
|
tensorflow-master
|
tensorflow/python/platform/resource_loader.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic entry point script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
from absl.app import run as _run
from tensorflow.python.platform import flags
from tensorflow.python.util.tf_export import tf_export
def _parse_flags_tolerate_undef(argv):
"""Parse args, returning any unknown flags (ABSL defaults to crashing)."""
return flags.FLAGS(_sys.argv if argv is None else argv, known_only=True)
@tf_export(v1=['app.run'])
def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list."""
main = main or _sys.modules['__main__'].main
_run(main=main, argv=argv, flags_parser=_parse_flags_tolerate_undef)
|
tensorflow-master
|
tensorflow/python/platform/app.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Imports absltest as a replacement for testing.pybase.googletest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import os
import sys
import tempfile
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from absl.testing.absltest import *
# pylint: enable=wildcard-import
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import app
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
Benchmark = benchmark.TensorFlowBenchmark # pylint: disable=invalid-name
absltest_main = main
# We keep a global variable in this module to make sure we create the temporary
# directory only once per test binary invocation.
_googletest_temp_dir = ''
# pylint: disable=invalid-name
# pylint: disable=undefined-variable
def g_main(argv):
"""Delegate to absltest.main."""
absltest_main(argv=argv)
# Redefine main to allow running benchmarks
def main(argv=None): # pylint: disable=function-redefined
def main_wrapper():
args = argv
if args is None:
args = sys.argv
return app.run(main=g_main, argv=args)
benchmark.benchmarks_main(true_main=main_wrapper)
def GetTempDir():
"""Return a temporary directory for tests to use."""
global _googletest_temp_dir
if not _googletest_temp_dir:
if os.environ.get('TEST_TMPDIR'):
temp_dir = tempfile.mkdtemp(prefix=os.environ['TEST_TMPDIR'])
else:
first_frame = tf_inspect.stack()[-1][0]
temp_dir = os.path.join(tempfile.gettempdir(),
os.path.basename(tf_inspect.getfile(first_frame)))
temp_dir = tempfile.mkdtemp(prefix=temp_dir.rstrip('.py'))
# Make sure we have the correct path separators.
temp_dir = temp_dir.replace('/', os.sep)
def delete_temp_dir(dirname=temp_dir):
try:
file_io.delete_recursively(dirname)
except errors.OpError as e:
logging.error('Error removing %s: %s', dirname, e)
atexit.register(delete_temp_dir)
_googletest_temp_dir = temp_dir
return _googletest_temp_dir
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "contrib/session_bundle/example".
Returns:
An absolute path to the linked in runfiles.
"""
return os.path.join(os.environ['TEST_SRCDIR'],
'org_tensorflow/tensorflow', relative_path)
def StatefulSessionAvailable():
return False
@tf_export(v1=['test.StubOutForTesting'])
class StubOutForTesting(object):
"""Support class for stubbing methods out for unit testing.
Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.CleanUp()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the CleanUp() looks up the old
value of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
"""Do not rely on the destructor to undo your stubs.
You cannot guarantee exactly when the destructor will get called without
relying on implementation details of a Python VM that may change.
"""
self.CleanUp()
# __enter__ and __exit__ allow use as a context manager.
def __enter__(self):
return self
def __exit__(self, unused_exc_type, unused_exc_value, unused_tb):
self.CleanUp()
def CleanUp(self):
"""Undoes all SmartSet() & Set() calls, restoring original definitions."""
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr.
This method is smart and works at the module, class, and instance level
while preserving proper inheritance. It will not stub out C types however
unless that has been explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Args:
obj: The object whose attributes we want to modify.
attr_name: The name of the attribute to modify.
new_attr: The new value for the attribute.
Raises:
AttributeError: If the attribute cannot be found.
"""
_, obj = tf_decorator.unwrap(obj)
if (tf_inspect.ismodule(obj) or
(not tf_inspect.isclass(obj) and attr_name in obj.__dict__)):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not tf_inspect.isclass(obj):
mro = list(tf_inspect.getmro(obj.__class__))
else:
mro = list(tf_inspect.getmro(obj))
mro.reverse()
orig_attr = None
found_attr = False
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
found_attr = True
except AttributeError:
continue
if not found_attr:
raise AttributeError('Attribute not found.')
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses SmartSet() calls, restoring things to original definitions.
This method is automatically called when the StubOutForTesting()
object is deleted; there is no need to call it explicitly.
It is okay to call SmartUnsetAll() repeatedly, as later calls have
no effect if no SmartSet() calls have been made.
"""
for args in reversed(self.stubs):
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""In parent, replace child_name's old definition with new_child.
The parent could be a module when the child is a function at
module scope. Or the parent could be a class when a class' method
is being replaced. The named child is set to new_child, while the
prior definition is saved away for later, when UnsetAll() is
called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
Args:
parent: The context in which the attribute child_name is to be changed.
child_name: The name of the attribute to change.
new_child: The new value of the attribute.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses Set() calls, restoring things to their original definitions.
This method is automatically called when the StubOutForTesting()
object is deleted; there is no need to call it explicitly.
It is okay to call UnsetAll() repeatedly, as later calls have no
effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
for (parent, old_child, child_name) in reversed(self.cache):
setattr(parent, child_name, old_child)
self.cache = []
|
tensorflow-master
|
tensorflow/python/platform/googletest.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
import platform as _platform
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
from tensorflow.python.framework.versions import VERSION as _VERSION
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=g-import-not-at-top
@tf_export('sysconfig.get_include')
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow_core module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow_core module missing symbols that come after sysconfig.
import tensorflow_core as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
@tf_export('sysconfig.get_lib')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
import tensorflow_core as tf
return _os_path.join(_os_path.dirname(tf.__file__))
@tf_export('sysconfig.get_compile_flags')
def get_compile_flags():
"""Get the compilation flags for custom operators.
Returns:
The compilation flags.
"""
flags = []
flags.append('-I%s' % get_include())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)
return flags
@tf_export('sysconfig.get_link_flags')
def get_link_flags():
"""Get the link flags for custom operators.
Returns:
The link flags.
"""
is_mac = _platform.system() == 'Darwin'
ver = _VERSION.split('.')[0]
flags = []
if not _MONOLITHIC_BUILD:
flags.append('-L%s' % get_lib())
if is_mac:
flags.append('-l:libtensorflow_framework.%s.dylib' % ver)
else:
flags.append('-l:libtensorflow_framework.so.%s' % ver)
return flags
|
tensorflow-master
|
tensorflow/python/platform/sysconfig.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extension to unittest to run parameterized tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
raise ImportError("Not implemented yet.")
|
tensorflow-master
|
tensorflow/python/platform/parameterized.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the tf.test.benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import json_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class BenchmarkTest(test.TestCase, benchmark.TensorFlowBenchmark):
def testReportBenchmark(self):
output_dir = '/tmp/'
os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
proto_file_path = os.path.join(output_dir,
'BenchmarkTest.testReportBenchmark')
if os.path.exists(proto_file_path):
os.remove(proto_file_path)
self.report_benchmark(
iters=2000,
wall_time=1000,
name='testReportBenchmark',
metrics=[{'name': 'metric_name_1', 'value': 0, 'min_value': 1},
{'name': 'metric_name_2', 'value': 90, 'min_value': 0,
'max_value': 95}])
with open(proto_file_path, 'rb') as f:
benchmark_entries = test_log_pb2.BenchmarkEntries()
benchmark_entries.ParseFromString(f.read())
actual_result = json_format.MessageToDict(
benchmark_entries, preserving_proto_field_name=True,
including_default_value_fields=True)['entry'][0]
os.remove(proto_file_path)
expected_result = {
'name': 'BenchmarkTest.testReportBenchmark',
# google.protobuf.json_format.MessageToDict() will convert
# int64 field to string.
'iters': '2000',
'wall_time': 1000,
'cpu_time': 0,
'throughput': 0,
'extras': {},
'metrics': [
{
'name': 'metric_name_1',
'value': 0,
'min_value': 1
},
{
'name': 'metric_name_2',
'value': 90,
'min_value': 0,
'max_value': 95
}
]
}
self.assertEqual(2000, benchmark_entries.entry[0].iters)
self.assertDictEqual(expected_result, actual_result)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/platform/benchmark_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FeatureColumns: tools for ingesting and representing features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,wildcard-import
from tensorflow.python.feature_column.feature_column import *
from tensorflow.python.feature_column.feature_column_v2 import *
from tensorflow.python.feature_column.sequence_feature_column import *
from tensorflow.python.feature_column.serialization import *
# pylint: enable=unused-import,line-too-long
|
tensorflow-master
|
tensorflow/python/feature_column/feature_column_lib.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for sequence feature columns with SequenceExamples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import tempfile
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.keras.layers import recurrent
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class SequenceFeatureColumnIntegrationTest(test.TestCase):
def _make_sequence_example(self):
example = example_pb2.SequenceExample()
example.context.feature['int_ctx'].int64_list.value.extend([5])
example.context.feature['float_ctx'].float_list.value.extend([123.6])
for val in range(0, 10, 2):
feat = feature_pb2.Feature()
feat.int64_list.value.extend([val] * val)
example.feature_lists.feature_list['int_list'].feature.extend([feat])
for val in range(1, 11, 2):
feat = feature_pb2.Feature()
feat.bytes_list.value.extend([compat.as_bytes(str(val))] * val)
example.feature_lists.feature_list['str_list'].feature.extend([feat])
return example
def _build_feature_columns(self):
col = fc.categorical_column_with_identity('int_ctx', num_buckets=100)
ctx_cols = [
fc.embedding_column(col, dimension=10),
fc.numeric_column('float_ctx')
]
identity_col = sfc.sequence_categorical_column_with_identity(
'int_list', num_buckets=10)
bucket_col = sfc.sequence_categorical_column_with_hash_bucket(
'bytes_list', hash_bucket_size=100)
seq_cols = [
fc.embedding_column(identity_col, dimension=10),
fc.embedding_column(bucket_col, dimension=20)
]
return ctx_cols, seq_cols
def test_sequence_example_into_input_layer(self):
examples = [_make_sequence_example().SerializeToString()] * 100
ctx_cols, seq_cols = self._build_feature_columns()
def _parse_example(example):
ctx, seq = parsing_ops.parse_single_sequence_example(
example,
context_features=fc.make_parse_example_spec_v2(ctx_cols),
sequence_features=fc.make_parse_example_spec_v2(seq_cols))
ctx.update(seq)
return ctx
ds = dataset_ops.Dataset.from_tensor_slices(examples)
ds = ds.map(_parse_example)
ds = ds.batch(20)
# Test on a single batch
features = ds.make_one_shot_iterator().get_next()
# Tile the context features across the sequence features
sequence_input_layer = sfc.SequenceFeatures(seq_cols)
seq_layer, _ = sequence_input_layer(features)
input_layer = fc.DenseFeatures(ctx_cols)
ctx_layer = input_layer(features)
input_layer = sfc.concatenate_context_input(ctx_layer, seq_layer)
rnn_layer = recurrent.RNN(recurrent.SimpleRNNCell(10))
output = rnn_layer(input_layer)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
features_r = sess.run(features)
self.assertAllEqual(features_r['int_list'].dense_shape, [20, 3, 6])
output_r = sess.run(output)
self.assertAllEqual(output_r.shape, [20, 10])
class SequenceExampleParsingTest(test.TestCase):
def test_seq_ex_in_sequence_categorical_column_with_identity(self):
self._test_parsed_sequence_example(
'int_list', sfc.sequence_categorical_column_with_identity,
10, [3, 6], [2, 4, 6])
def test_seq_ex_in_sequence_categorical_column_with_hash_bucket(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_hash_bucket,
10, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_list(self):
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_list,
list(string.ascii_lowercase), [3, 4],
[compat.as_bytes(x) for x in 'acg'])
def test_seq_ex_in_sequence_categorical_column_with_vocabulary_file(self):
_, fname = tempfile.mkstemp()
with open(fname, 'w') as f:
f.write(string.ascii_lowercase)
self._test_parsed_sequence_example(
'bytes_list', sfc.sequence_categorical_column_with_vocabulary_file,
fname, [3, 4], [compat.as_bytes(x) for x in 'acg'])
def _test_parsed_sequence_example(
self, col_name, col_fn, col_arg, shape, values):
"""Helper function to check that each FeatureColumn parses correctly.
Args:
col_name: string, name to give to the feature column. Should match
the name that the column will parse out of the features dict.
col_fn: function used to create the feature column. For example,
sequence_numeric_column.
col_arg: second arg that the target feature column is expecting.
shape: the expected dense_shape of the feature after parsing into
a SparseTensor.
values: the expected values at index [0, 2, 6] of the feature
after parsing into a SparseTensor.
"""
example = _make_sequence_example()
columns = [
fc.categorical_column_with_identity('int_ctx', num_buckets=100),
fc.numeric_column('float_ctx'),
col_fn(col_name, col_arg)
]
context, seq_features = parsing_ops.parse_single_sequence_example(
example.SerializeToString(),
context_features=fc.make_parse_example_spec_v2(columns[:2]),
sequence_features=fc.make_parse_example_spec_v2(columns[2:]))
with self.cached_session() as sess:
ctx_result, seq_result = sess.run([context, seq_features])
self.assertEqual(list(seq_result[col_name].dense_shape), shape)
self.assertEqual(
list(seq_result[col_name].values[[0, 2, 6]]), values)
self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])
self.assertEqual(ctx_result['int_ctx'].values[0], 5)
self.assertEqual(list(ctx_result['float_ctx'].shape), [1])
self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)
_SEQ_EX_PROTO = """
context {
feature {
key: "float_ctx"
value {
float_list {
value: 123.6
}
}
}
feature {
key: "int_ctx"
value {
int64_list {
value: 5
}
}
}
}
feature_lists {
feature_list {
key: "bytes_list"
value {
feature {
bytes_list {
value: "a"
}
}
feature {
bytes_list {
value: "b"
value: "c"
}
}
feature {
bytes_list {
value: "d"
value: "e"
value: "f"
value: "g"
}
}
}
}
feature_list {
key: "float_list"
value {
feature {
float_list {
value: 1.0
}
}
feature {
float_list {
value: 3.0
value: 3.0
value: 3.0
}
}
feature {
float_list {
value: 5.0
value: 5.0
value: 5.0
value: 5.0
value: 5.0
}
}
}
}
feature_list {
key: "int_list"
value {
feature {
int64_list {
value: 2
value: 2
}
}
feature {
int64_list {
value: 4
value: 4
value: 4
value: 4
}
}
feature {
int64_list {
value: 6
value: 6
value: 6
value: 6
value: 6
value: 6
}
}
}
}
}
"""
def _make_sequence_example():
example = example_pb2.SequenceExample()
return text_format.Parse(_SEQ_EX_PROTO, example)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/feature_column/sequence_feature_column_integration_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
NOTE: The new feature columns are being developed in feature_column_v2.py and
are a somewhat duplicate of the code here. Please make sure to update logic
in both places.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine import training
from tensorflow.python.layers import base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def _internal_input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
scope=None,
cols_to_output_tensors=None,
from_template=False):
"""See input_layer. `scope` is a name or variable scope to use."""
feature_columns = _normalize_feature_columns(feature_columns)
for column in feature_columns:
if not isinstance(column, _DenseColumn):
raise ValueError(
'Items of feature_columns must be a _DenseColumn. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(column))
weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in weight_collections:
weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
def _get_logits(): # pylint: disable=missing-docstring
builder = _LazyBuilder(features)
output_tensors = []
ordered_columns = []
for column in sorted(feature_columns, key=lambda x: x.name):
ordered_columns.append(column)
with variable_scope.variable_scope(
None, default_name=column._var_scope_name): # pylint: disable=protected-access
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
output_tensor = array_ops.reshape(
tensor, shape=(batch_size, num_elements))
output_tensors.append(output_tensor)
if cols_to_vars is not None:
# Retrieve any variables created (some _DenseColumn's don't create
# variables, in which case an empty list is returned).
cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES,
scope=variable_scope.get_variable_scope().name)
if cols_to_output_tensors is not None:
cols_to_output_tensors[column] = output_tensor
_verify_static_batch_size_equality(output_tensors, ordered_columns)
return array_ops.concat(output_tensors, 1)
# If we're constructing from the `make_template`, that by default adds a
# variable scope with the name of the layer. In that case, we dont want to
# add another `variable_scope` as that would break checkpoints.
if from_template:
return _get_logits()
else:
with variable_scope.variable_scope(
scope, default_name='input_layer', values=features.values()):
return _get_logits()
@tf_export(v1=['feature_column.input_layer'])
def input_layer(features,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
cols_to_output_tensors=None):
"""Returns a dense `Tensor` as input layer based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
Example:
```python
price = numeric_column('price')
keywords_embedded = embedding_column(
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
columns = [price, keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
for units in [128, 64, 32]:
dense_tensor = tf.compat.v1.layers.dense(dense_tensor, units, tf.nn.relu)
prediction = tf.compat.v1.layers.dense(dense_tensor, 1)
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical features,
you can wrap them with an `embedding_column` or `indicator_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to list of `Variable`s. For example, after
the call, we might have cols_to_vars =
{_EmbeddingColumn(
categorical_column=_HashedCategoricalColumn(
key='sparse_feature', hash_bucket_size=5, dtype=tf.string),
dimension=10): [<tf.Variable 'some_variable:0' shape=(5, 10),
<tf.Variable 'some_variable:1' shape=(5, 10)]}
If a column creates no variables, its value will be an empty list.
cols_to_output_tensors: If not `None`, must be a dictionary that will be
filled with a mapping from '_FeatureColumn' to the associated
output `Tensor`s.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: if an item in `feature_columns` is not a `_DenseColumn`.
"""
return _internal_input_layer(
features,
feature_columns,
weight_collections=weight_collections,
trainable=trainable,
cols_to_vars=cols_to_vars,
cols_to_output_tensors=cols_to_output_tensors)
# TODO(akshayka): InputLayer should be a subclass of Layer, and it
# should implement the logic in input_layer using Layer's build-and-call
# paradigm; input_layer should create an instance of InputLayer and
# return the result of invoking its apply method, just as functional layers do.
class InputLayer(object):
"""An object-oriented version of `input_layer` that reuses variables."""
def __init__(self,
feature_columns,
weight_collections=None,
trainable=True,
cols_to_vars=None,
name='feature_column_input_layer',
create_scope_now=True):
"""See `input_layer`."""
self._feature_columns = feature_columns
self._weight_collections = weight_collections
self._trainable = trainable
self._cols_to_vars = cols_to_vars
self._name = name
self._input_layer_template = template.make_template(
self._name, _internal_input_layer, create_scope_now_=create_scope_now)
self._scope = self._input_layer_template.variable_scope
def __call__(self, features):
return self._input_layer_template(
features=features,
feature_columns=self._feature_columns,
weight_collections=self._weight_collections,
trainable=self._trainable,
cols_to_vars=None,
from_template=True)
@property
def name(self):
return self._name
@property
def non_trainable_variables(self):
return self._input_layer_template.non_trainable_variables
@property
def non_trainable_weights(self):
return self._input_layer_template.non_trainable_weights
@property
def trainable_variables(self):
return self._input_layer_template.trainable_variables
@property
def trainable_weights(self):
return self._input_layer_template.trainable_weights
@property
def variables(self):
return self._input_layer_template.variables
@property
def weights(self):
return self._input_layer_template.weights
@tf_export(v1=['feature_column.linear_model'])
def linear_model(features,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
cols_to_vars=None):
"""Returns a linear prediction `Tensor` based on given `feature_columns`.
This function generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `linear_model` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features, columns)
```
The `sparse_combiner` argument works as follows
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "f"
}
```
with `sparse_combiner` as "mean", the linear model outputs consequently
are:
```
y_0 = 1.0 / 2.0 * ( w_a + w_b ) + w_d + b
y_1 = w_c + 1.0 / 3.0 * ( w_e + 2.0 * w_f ) + b
```
where `y_i` is the output, `b` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing the FeatureColumns to use as inputs
to your model. All items should be instances of classes derived from
`_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum" are
supported, with "sum" the default for linear model. "sqrtn" often achieves
good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
weight_collections: A list of collection names to which the Variable will be
added. Note that, variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
cols_to_vars: If not `None`, must be a dictionary that will be filled with a
mapping from `_FeatureColumn` to associated list of `Variable`s. For
example, after the call, we might have cols_to_vars = {
_NumericColumn(
key='numeric_feature1', shape=(1,):
[<tf.Variable 'linear_model/price2/weights:0' shape=(1, 1)>],
'bias': [<tf.Variable 'linear_model/bias_weights:0' shape=(1,)>],
_NumericColumn(
key='numeric_feature2', shape=(2,)):
[<tf.Variable 'linear_model/price1/weights:0' shape=(2, 1)>]}
If a column creates no variables, its value will be an empty list. Note
that cols_to_vars will also contain a string key 'bias' that maps to a
list of Variables.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its shape
is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: if an item in `feature_columns` is neither a `_DenseColumn`
nor `_CategoricalColumn`.
"""
with variable_scope.variable_scope(None, 'linear_model') as vs:
model_name = _strip_leading_slashes(vs.name)
linear_model_layer = _LinearModel(
feature_columns=feature_columns,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable,
name=model_name)
retval = linear_model_layer(features) # pylint: disable=not-callable
if cols_to_vars is not None:
cols_to_vars.update(linear_model_layer.cols_to_vars())
return retval
def _add_to_collections(var, weight_collections):
"""Adds a var to the list of weight_collections provided.
Handles the case for partitioned and non-partitioned variables.
Args:
var: A variable or Partitioned Variable.
weight_collections: List of collections to add variable to.
"""
for weight_collection in weight_collections:
# The layer self.add_variable call already adds it to GLOBAL_VARIABLES.
if weight_collection == ops.GraphKeys.GLOBAL_VARIABLES:
continue
# TODO(rohanj): Explore adding a _get_variable_list method on `Variable`
# so that we don't have to do this check.
if isinstance(var, variables.PartitionedVariable):
for constituent_var in list(var):
ops.add_to_collection(weight_collection, constituent_var)
else:
ops.add_to_collection(weight_collection, var)
class _FCLinearWrapper(base.Layer):
"""Wraps a _FeatureColumn in a layer for use in a linear model.
See `linear_model` above.
"""
def __init__(self,
feature_column,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
name=None,
**kwargs):
super(_FCLinearWrapper, self).__init__(
trainable=trainable, name=name, **kwargs)
self._feature_column = feature_column
self._units = units
self._sparse_combiner = sparse_combiner
self._weight_collections = weight_collections
def build(self, _):
if isinstance(self._feature_column, _CategoricalColumn):
weight = self.add_variable(
name='weights',
shape=(self._feature_column._num_buckets, self._units), # pylint: disable=protected-access
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
else:
num_elements = self._feature_column._variable_shape.num_elements() # pylint: disable=protected-access
weight = self.add_variable(
name='weights',
shape=[num_elements, self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
_add_to_collections(weight, self._weight_collections)
self._weight_var = weight
self.built = True
def call(self, builder):
weighted_sum = _create_weighted_sum(
column=self._feature_column,
builder=builder,
units=self._units,
sparse_combiner=self._sparse_combiner,
weight_collections=self._weight_collections,
trainable=self.trainable,
weight_var=self._weight_var)
return weighted_sum
class _BiasLayer(base.Layer):
"""A layer for the bias term.
"""
def __init__(self,
units=1,
trainable=True,
weight_collections=None,
name=None,
**kwargs):
super(_BiasLayer, self).__init__(trainable=trainable, name=name, **kwargs)
self._units = units
self._weight_collections = weight_collections
def build(self, _):
self._bias_variable = self.add_variable(
'bias_weights',
shape=[self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
_add_to_collections(self._bias_variable, self._weight_collections)
self.built = True
def call(self, _):
return self._bias_variable
def _get_expanded_variable_list(variable):
if (isinstance(variable, variables.Variable) or
resource_variable_ops.is_resource_variable(variable)):
return [variable] # Single variable case.
else: # Must be a PartitionedVariable, so convert into a list.
return list(variable)
def _strip_leading_slashes(name):
return name.rsplit('/', 1)[-1]
class _LinearModel(training.Model):
"""Creates a linear model using feature columns.
See `linear_model` for details.
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
weight_collections=None,
trainable=True,
name=None,
**kwargs):
super(_LinearModel, self).__init__(name=name, **kwargs)
self._feature_columns = _normalize_feature_columns(
feature_columns)
self._weight_collections = list(weight_collections or [])
if ops.GraphKeys.GLOBAL_VARIABLES not in self._weight_collections:
self._weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
if ops.GraphKeys.MODEL_VARIABLES not in self._weight_collections:
self._weight_collections.append(ops.GraphKeys.MODEL_VARIABLES)
column_layers = {}
for column in sorted(self._feature_columns, key=lambda x: x.name):
with variable_scope.variable_scope(
None, default_name=column._var_scope_name) as vs: # pylint: disable=protected-access
# Having the fully expressed variable scope name ends up doubly
# expressing the outer scope (scope with which this method was called)
# in the name of the variable that would get created.
column_name = _strip_leading_slashes(vs.name)
column_layer = _FCLinearWrapper(column, units, sparse_combiner,
self._weight_collections, trainable,
column_name, **kwargs)
column_layers[column_name] = column_layer
self._column_layers = self._add_layers(column_layers)
self._bias_layer = _BiasLayer(
units=units,
trainable=trainable,
weight_collections=self._weight_collections,
name='bias_layer',
**kwargs)
self._cols_to_vars = {}
def cols_to_vars(self):
"""Returns a dict mapping _FeatureColumns to variables.
See `linear_model` for more information.
This is not populated till `call` is called i.e. layer is built.
"""
return self._cols_to_vars
def call(self, features):
with variable_scope.variable_scope(self.name):
for column in self._feature_columns:
if not isinstance(column, (_DenseColumn, _CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'_DenseColumn or _CategoricalColumn. Given: {}'.format(column))
weighted_sums = []
ordered_columns = []
builder = _LazyBuilder(features)
for layer in sorted(self._column_layers.values(), key=lambda x: x.name):
column = layer._feature_column # pylint: disable=protected-access
ordered_columns.append(column)
weighted_sum = layer(builder)
weighted_sums.append(weighted_sum)
self._cols_to_vars[column] = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES, scope=layer.scope_name)
_verify_static_batch_size_equality(weighted_sums, ordered_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias,
self._bias_layer( # pylint: disable=not-callable
builder,
scope=variable_scope.get_variable_scope()), # pylint: disable=not-callable
name='weighted_sum')
bias = self._bias_layer.variables[0]
self._cols_to_vars['bias'] = _get_expanded_variable_list(bias)
return predictions
def _add_layers(self, layers):
# "Magic" required for keras.Model classes to track all the variables in
# a list of layers.Layer objects.
# TODO(ashankar): Figure out API so user code doesn't have to do this.
for name, layer in layers.items():
setattr(self, 'layer-%s' % name, layer)
return layers
def _transform_features(features, feature_columns):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `_FeatureColumn`.
feature_columns: An iterable containing all the `_FeatureColumn`s.
Returns:
A `dict` mapping `_FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
builder = _LazyBuilder(features)
for column in sorted(feature_columns, key=lambda x: x.name):
with ops.name_scope(None, default_name=column.name):
outputs[column] = builder.get(column)
return outputs
@tf_export(v1=['feature_column.make_parse_example_spec'])
def make_parse_example_spec(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in
`tf.io.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = categorical_column_with_vocabulary_file(...)
feature_b = numeric_column(...)
feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.io.parse_example(
serialized=serialized_examples,
features=make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `_FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `_FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError(
'All feature_columns must be _FeatureColumn instances. '
'Given: {}'.format(column))
config = column._parse_example_spec # pylint: disable=protected-access
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
def _embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""`_DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `_CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `_CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.compat.v1.truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
`_DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
embedding_shape = categorical_column._num_buckets, dimension # pylint: disable=protected-access
def _creator(weight_collections, scope):
embedding_column_layer = _EmbeddingColumnLayer(
embedding_shape=embedding_shape,
initializer=initializer,
weight_collections=weight_collections,
trainable=trainable,
name='embedding_column_layer')
return embedding_column_layer(None, scope=scope) # pylint: disable=not-callable
return _EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
layer_creator=_creator,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
def _numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.io.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `_NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return _NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `_BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, _NumericColumn):
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if (not boundaries or
not (isinstance(boundaries, list) or isinstance(boundaries, tuple))):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return _BucketizedColumn(source_column, tuple(boundaries))
def _categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return _HashedCategoricalColumn(key, hash_bucket_size, dtype)
def _categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `_CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file) as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return _VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
def _categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `_CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `_CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return _VocabularyListCategoricalColumn(
key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype,
default_value=default_value, num_oov_buckets=num_oov_buckets)
def _categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `_CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace inputs in that range.
Returns:
A `_CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return _IdentityCategoricalColumn(
key=key, num_buckets=num_buckets, default_value=default_value)
def _indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `_IndicatorColumn`.
"""
return _IndicatorColumn(categorical_column)
def _weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `_CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `_CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `_CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return _WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
def _crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `_CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `_CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `_CategoricalColumn`.
ValueError: If any of the keys is `_HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, _CategoricalColumn)):
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except _HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key, _HashedCategoricalColumn):
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return _CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size,
hash_key=hash_key)
# TODO(rohanj): Clearly define semantics of this layer.
class _EmbeddingColumnLayer(base.Layer):
"""A layer that stores all the state required for a embedding column."""
def __init__(self,
embedding_shape,
initializer,
weight_collections=None,
trainable=True,
name=None,
**kwargs):
"""Constructor.
Args:
embedding_shape: Shape of the embedding variable used for lookup.
initializer: A variable initializer function to be used in embedding
variable initialization.
weight_collections: A list of collection names to which the Variable will
be added. Note that, variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name of the layer
**kwargs: keyword named properties.
"""
super(_EmbeddingColumnLayer, self).__init__(
trainable=trainable, name=name, **kwargs)
self._embedding_shape = embedding_shape
self._initializer = initializer
self._weight_collections = weight_collections
def set_weight_collections(self, weight_collections):
"""Sets the weight collections for the layer.
Args:
weight_collections: A list of collection names to which the Variable will
be added.
"""
self._weight_collections = weight_collections
def build(self, _):
self._embedding_weight_var = self.add_variable(
name='embedding_weights',
shape=self._embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self.trainable)
if self._weight_collections and not context.executing_eagerly():
_add_to_collections(self._embedding_weight_var, self._weight_collections)
self.built = True
def call(self, _):
return self._embedding_weight_var
@six.add_metaclass(abc.ABCMeta)
class _FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish the concept of a feature family and a specific binary feature
within a family, we refer to a feature family like "country" as a feature
column. Following is an example feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. User should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming and for name_scope."""
pass
@property
def _var_scope_name(self):
"""Returns string. Used for variable_scope. Defaults to self.name."""
return self.name
@abc.abstractmethod
def _transform_feature(self, inputs):
"""Returns intermediate representation (usually a `Tensor`).
Uses `inputs` to create an intermediate representation (usually a `Tensor`)
that other feature columns can use.
Example usage of `inputs`:
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will
be used as follows:
```python
raw_tensor = inputs.get('raw')
fc_tensor = inputs.get(input_fc)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def _parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.io.parse_example` for
all supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`_FeatureColumn` (input_fc). One possible implementation of
_parse_example_spec is as follows:
```python
spec = {'raw': tf.io.FixedLenFeature(...)}
spec.update(input_fc._parse_example_spec)
return spec
```
"""
pass
def _reset_config(self):
"""Resets the configuration in the column.
Some feature columns e.g. embedding or shared embedding columns might
have some state that is needed to be reset sometimes. Use this method
in that scenario.
"""
class _DenseColumn(_FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def _variable_shape(self):
"""`TensorShape` of `_get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc._get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: List of graph collections to which Variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
Returns:
`Tensor` of shape [batch_size] + `_variable_shape`.
"""
pass
def _create_weighted_sum(column,
builder,
units,
sparse_combiner,
weight_collections,
trainable,
weight_var=None):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, _CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
builder=builder,
units=units,
sparse_combiner=sparse_combiner,
weight_collections=weight_collections,
trainable=trainable,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
builder=builder,
units=units,
weight_collections=weight_collections,
trainable=trainable,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column,
builder,
units,
weight_collections,
trainable,
weight_var=None):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column._get_dense_tensor( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
if weight_var is not None:
weight = weight_var
else:
weight = variable_scope.get_variable(
name='weights',
shape=[num_elements, units],
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return math_ops.matmul(tensor, weight, name='weighted_sum')
class _CategoricalColumn(_FeatureColumn):
"""Represents a categorical feature.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ['id_tensor', 'weight_tensor'])
@abc.abstractproperty
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
inputs: A `LazyBuilder` as a cache to get input tensors required to
create `IdWeightPair`.
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.compat.v1.get_variable`).
"""
pass
def _create_categorical_column_weighted_sum(column,
builder,
units,
sparse_combiner,
weight_collections,
trainable,
weight_var=None):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access
builder,
weight_collections=weight_collections,
trainable=trainable)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
if weight_var is not None:
weight = weight_var
else:
weight = variable_scope.get_variable(
name='weights',
shape=(column._num_buckets, units), # pylint: disable=protected-access
initializer=init_ops.zeros_initializer(),
trainable=trainable,
collections=weight_collections)
return embedding_ops.safe_embedding_lookup_sparse(
weight,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class _SequenceDenseColumn(_FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ['dense_tensor', 'sequence_length'])
@abc.abstractmethod
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
"""Returns a `TensorSequenceLengthPair`."""
pass
class _LazyBuilder(object):
"""Handles caching of transformations while building the model.
`_FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`_LazyBuilder` caches all previously transformed columns.
Example:
We're trying to use the following `_FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `_LazyBuilder` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `_LazyBuilder`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `_FeatureColumn` key
means that this `Tensor` is the output of an existing `_FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`_FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `_FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `_FeatureColumn`.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, _FeatureColumn):
raise TypeError('"key" must be either a "str" or "_FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column._transform_feature(self) # pylint: disable=protected-access
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Give: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _shape_offsets(shape):
"""Returns moving offset for each dimension given shape."""
offsets = []
for dim in reversed(shape):
if offsets:
offsets.append(dim * offsets[-1])
else:
offsets.append(dim)
offsets.reverse()
return offsets
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, _FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, _FeatureColumn):
raise ValueError('Items of feature_columns must be a _FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return feature_columns
class _NumericColumn(_DenseColumn,
collections.namedtuple('_NumericColumn', [
'key', 'shape', 'default_value', 'dtype',
'normalizer_fn'
])):
"""see `numeric_column`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(self.shape)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing numeric feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
class _BucketizedColumn(_DenseColumn, _CategoricalColumn,
collections.namedtuple('_BucketizedColumn', [
'source_column', 'boundaries'])):
"""See `bucketized_column`."""
@property
def name(self):
return '{}_bucketized'.format(self.source_column.name)
@property
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def _variable_shape(self):
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
@property
def _num_buckets(self):
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = inputs.get(self)
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return _CategoricalColumn.IdWeightPair(sparse_tensor, None)
class _EmbeddingColumn(
_DenseColumn, _SequenceDenseColumn,
collections.namedtuple(
'_EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'layer_creator',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_embedding'.format(self.categorical_column.name)
return self._name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor_internal(self,
inputs,
weight_collections=None,
trainable=None):
"""Private method that follows the signature of _get_dense_tensor."""
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.layer_creator(
weight_collections=weight_collections,
scope=variable_scope.get_variable_scope())
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use input_layer, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'sequence_input_layer instead of input_layer. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use sequence_input_layer. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_graph_for_variable(var):
if isinstance(var, variables.PartitionedVariable):
return list(var)[0].graph
else:
return var.graph
class _SharedEmbeddingColumn(
_DenseColumn, _SequenceDenseColumn,
collections.namedtuple(
'_SharedEmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'shared_embedding_collection_name', 'ckpt_to_load_from',
'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
"""See `embedding_column`."""
@property
def name(self):
if not hasattr(self, '_name'):
self._name = '{}_shared_embedding'.format(self.categorical_column.name)
return self._name
@property
def _var_scope_name(self):
return self.shared_embedding_collection_name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def _variable_shape(self):
if not hasattr(self, '_shape'):
self._shape = tensor_shape.vector(self.dimension)
return self._shape
def _get_dense_tensor_internal(self,
inputs,
weight_collections=None,
trainable=None):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections=weight_collections, trainable=trainable)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
shared_embedding_collection = ops.get_collection(
self.shared_embedding_collection_name)
if shared_embedding_collection:
if len(shared_embedding_collection) > 1:
raise ValueError(
'Collection {} can only contain one variable. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(shared_embedding_collection))
embedding_weights = shared_embedding_collection[0]
if embedding_weights.get_shape() != embedding_shape:
raise ValueError(
'Shared embedding collection {} contains variable {} of '
'unexpected shape {}. Expected shape is {}. '
'Suggested fix A: Choose a unique name for this collection. '
'Suggested fix B: Do not add any variables to this collection. '
'The feature_column library already adds a variable under the '
'hood.'.format(self.shared_embedding_collection_name,
embedding_weights.name,
embedding_weights.get_shape(), embedding_shape))
else:
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
ops.add_to_collection(self.shared_embedding_collection_name,
embedding_weights)
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use input_layer, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'sequence_input_layer instead of input_layer. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use sequence_input_layer. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal( # pylint: disable=protected-access
inputs=inputs,
weight_collections=weight_collections,
trainable=trainable)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, six.integer_types):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class _HashedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_HashedCategoricalColumn',
['key', 'hash_bucket_size', 'dtype'])):
"""see `categorical_column_with_hash_bucket`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyFileCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyFileCategoricalColumn', (
'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype',
'default_value'
))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
return lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _VocabularyListCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_VocabularyListCategoricalColumn', (
'key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'
))):
"""See `categorical_column_with_vocabulary_list`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
return lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _IdentityCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_IdentityCategoricalColumn', (
'key', 'num_buckets', 'default_value'
))):
"""See `categorical_column_with_identity`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
if self.default_value is None:
# Fail if values are out-of-range.
assert_less = check_ops.assert_less(
values, num_buckets, data=(values, num_buckets),
name='assert_less_than_num_buckets')
assert_greater = check_ops.assert_greater_equal(
values, zero, data=(values,),
name='assert_greater_or_equal_0')
with ops.control_dependencies((assert_less, assert_greater)):
values = array_ops.identity(values)
else:
# Assign default for out-of-range values.
values = array_ops.where(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.num_buckets
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
class _WeightedCategoricalColumn(
_CategoricalColumn,
collections.namedtuple('_WeightedCategoricalColumn', (
'categorical_column', 'weight_feature_key', 'dtype'
))):
"""See `weighted_categorical_column`."""
@property
def name(self):
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_feature(self, inputs):
weight_tensor = inputs.get(self.weight_feature_key)
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return (inputs.get(self.categorical_column), weight_tensor)
def _get_sparse_tensors(
self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
class _CrossedColumn(
_CategoricalColumn,
collections.namedtuple('_CrossedColumn',
['keys', 'hash_bucket_size', 'hash_key'])):
"""See `crossed_column`."""
@property
def name(self):
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, _FeatureColumn):
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def _parse_example_spec(self):
config = {}
for key in self.keys:
if isinstance(key, _FeatureColumn):
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
def _transform_feature(self, inputs):
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, _CategoricalColumn):
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `_CrossedColumn`.
Returns:
A list of strings or `_CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, _CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
class _IndicatorColumn(_DenseColumn, _SequenceDenseColumn,
collections.namedtuple('_IndicatorColumn',
['categorical_column'])):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `_CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def name(self):
return '{}_indicator'.format(self.categorical_column.name)
def _transform_feature(self, inputs):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def _variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
"""Returns dense `Tensor` representing feature.
Args:
inputs: A `_LazyBuilder` object to access inputs.
weight_collections: Unused `weight_collections` since no variables are
created in this function.
trainable: Unused `trainable` bool since no variables are created in
this function.
Returns:
Dense `Tensor` created within `_transform_feature`.
Raises:
ValueError: If `categorical_column` is a `_SequenceCategoricalColumn`.
"""
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use input_layer, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'sequence_input_layer instead of input_layer. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return inputs.get(self)
def _get_sequence_dense_tensor(
self, inputs, weight_collections=None, trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(self.categorical_column, _SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use sequence_input_layer. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(
self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return _SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _verify_static_batch_size_equality(tensors, columns):
"""Validates that the first dim (batch size) of all tensors are equal or None.
Args:
tensors: list of tensors to check.
columns: list of feature columns matching tensors. Will be used for error
messaging.
Raises:
ValueError: if one of the tensors has a variant batch size
"""
# bath_size is a tf.compat.v1.Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
if tensors[i].shape.dims[0].value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = tensors[i].shape.dims[0]
elif not expected_batch_size.is_compatible_with(tensors[i].shape.dims[0]):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, tensors[i].shape.dims[0]))
class _SequenceCategoricalColumn(
_CategoricalColumn,
collections.namedtuple(
'_SequenceCategoricalColumn', ['categorical_column'])):
"""Represents sequences of categorical data."""
@property
def name(self):
return self.categorical_column.name
@property
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return _CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
|
tensorflow-master
|
tensorflow/python/feature_column/feature_column.py
|
tensorflow-master
|
tensorflow/python/feature_column/__init__.py
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequential_feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import sequence_feature_column as sfc
from tensorflow.python.feature_column import serialization
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
def _initialized_session(config=None):
sess = session.Session(config=config)
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class SequenceFeaturesTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [2, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 14., 15., 16.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]],],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[2], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 2, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[5., 6., 14., 15., 16.], [2., 3., 14., 15., 16.]],
# feature 1, [a: 0, 0, b: 2, -], [a: 1, -, b: 0, -]
[[1., 2., 17., 18., 19.], [3., 4., 11., 12., 13.]]],
'expected_sequence_length': [2, 2]},
)
@test_util.run_in_graph_and_eager_modes
def test_embedding_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size = 3
embedding_dimension_a = 2
embedding_values_a = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
embedding_dimension_b = 3
embedding_values_b = (
(11., 12., 13.), # id 0
(14., 15., 16.), # id 1
(17., 18., 19.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a,
dimension=embedding_dimension_a,
initializer=_get_initializer(embedding_dimension_a, embedding_values_a))
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_b = fc.embedding_column(
categorical_column_b,
dimension=embedding_dimension_b,
initializer=_get_initializer(embedding_dimension_b, embedding_values_b))
# Test that columns are reordered alphabetically.
sequence_input_layer = sfc.SequenceFeatures(
[embedding_column_b, embedding_column_a])
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b,})
self.evaluate(variables_lib.global_variables_initializer())
weights = sequence_input_layer.weights
self.assertCountEqual(
('sequence_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0'),
tuple([v.name for v in weights]))
self.assertAllEqual(embedding_values_a, self.evaluate(weights[0]))
self.assertAllEqual(embedding_values_b, self.evaluate(weights[1]))
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
@test_util.run_in_graph_and_eager_modes
def test_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must be of '
r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
sequence_input_layer = sfc.SequenceFeatures([embedding_column_a])
_, _ = sequence_input_layer({'aaa': sparse_input})
@test_util.run_in_graph_and_eager_modes
def test_shared_embedding_column(self):
with ops.Graph().as_default():
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [2, 0]
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 0),
dense_shape=(2, 2))
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 4.), # id 1
(5., 6.) # id 2
)
def _get_initializer(embedding_dimension, embedding_values):
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
return _initializer
expected_input_layer = [
# example 0, ids_a [2], ids_b [1]
[[5., 6., 3., 4.], [0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [2, 0]
[[1., 2., 5., 6.], [3., 4., 1., 2.]],
]
expected_sequence_length = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
# Test that columns are reordered alphabetically.
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension,
initializer=_get_initializer(embedding_dimension, embedding_values))
sequence_input_layer = sfc.SequenceFeatures(shared_embedding_columns)
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
with _initialized_session() as sess:
self.assertAllEqual(embedding_values,
global_vars[0].eval(session=sess))
self.assertAllEqual(expected_input_layer,
input_layer.eval(session=sess))
self.assertAllEqual(
expected_sequence_length, sequence_length.eval(session=sess))
@test_util.run_deprecated_v1
def test_shared_embedding_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence shared embedding column."""
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_shared_embedding\. categorical_column must '
r'be of type SequenceCategoricalColumn to use SequenceFeatures\.'):
sequence_input_layer = sfc.SequenceFeatures(shared_embedding_columns)
_, _ = sequence_input_layer({'aaa': sparse_input_a,
'bbb': sparse_input_b})
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args_a': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'sparse_input_args_b': {
# example 0, ids [1]
# example 1, ids [1, 0]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 1, 0),
'dense_shape': (2, 2)},
'expected_input_layer': [
# example 0, ids_a [2], ids_b [1]
[[0., 0., 1., 0., 1.], [0., 0., 0., 0., 0.]],
# example 1, ids_a [0, 1], ids_b [1, 0]
[[1., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'sparse_input_args_a': {
# feature 0, ids [[2], [0, 1]]
# feature 1, ids [[0, 0], [1]]
'indices': (
(0, 0, 0), (0, 1, 0), (0, 1, 1),
(1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 0, 0, 1),
'dense_shape': (2, 2, 2)},
'sparse_input_args_b': {
# feature 0, ids [[1, 1], [1]]
# feature 1, ids [[1], [0]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (1, 1, 1, 1, 0),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
# feature 0, [a: 2, -, b: 1, 1], [a: 0, 1, b: 1, -]
[[0., 0., 1., 0., 2.], [1., 1., 0., 0., 1.]],
# feature 1, [a: 0, 0, b: 1, -], [a: 1, -, b: 0, -]
[[2., 0., 0., 0., 1.], [0., 1., 0., 1., 0.]]],
'expected_sequence_length': [2, 2]},
)
@test_util.run_in_graph_and_eager_modes
def test_indicator_column(
self, sparse_input_args_a, sparse_input_args_b, expected_input_layer,
expected_sequence_length):
sparse_input_a = sparse_tensor.SparseTensorValue(**sparse_input_args_a)
sparse_input_b = sparse_tensor.SparseTensorValue(**sparse_input_args_b)
vocabulary_size_a = 3
vocabulary_size_b = 2
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size_a)
indicator_column_a = fc.indicator_column(categorical_column_a)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size_b)
indicator_column_b = fc.indicator_column(categorical_column_b)
# Test that columns are reordered alphabetically.
sequence_input_layer = sfc.SequenceFeatures(
[indicator_column_b, indicator_column_a])
input_layer, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
@test_util.run_in_graph_and_eager_modes
def test_indicator_column_with_non_sequence_categorical(self):
"""Tests that error is raised for non-sequence categorical column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc.indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must be of '
r'type SequenceCategoricalColumn to use SequenceFeatures\.'):
sequence_input_layer = sfc.SequenceFeatures([indicator_column_a])
_, _ = sequence_input_layer({'aaa': sparse_input})
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected_input_layer': [
[[0.], [1.]],
[[10.], [0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20., 3., 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected_input_layer': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]],
'expected_sequence_length': [2, 2]},
)
@test_util.run_in_graph_and_eager_modes
def test_numeric_column(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa')
sequence_input_layer = sfc.SequenceFeatures([numeric_column])
input_layer, sequence_length = sequence_input_layer({'aaa': sparse_input})
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [0., 1., 2., 3., 4., 5., 6., 7.]
# example 1, [10., 11., 12., 13.]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_input_layer': [
# The output of numeric_column._get_dense_tensor should be flattened.
[[0., 1., 2., 3.], [4., 5., 6., 7.]],
[[10., 11., 12., 13.], [0., 0., 0., 0.]]],
'expected_sequence_length': [2, 1]},
)
@test_util.run_in_graph_and_eager_modes
def test_numeric_column_multi_dim(
self, sparse_input_args, expected_input_layer, expected_sequence_length):
"""Tests SequenceFeatures for multi-dimensional numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
sequence_input_layer = sfc.SequenceFeatures([numeric_column])
input_layer, sequence_length = sequence_input_layer({'aaa': sparse_input})
self.assertAllEqual(expected_input_layer, self.evaluate(input_layer))
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
@test_util.run_in_graph_and_eager_modes
def test_sequence_length_not_equal(self):
"""Tests that an error is raised when sequence lengths are not equal."""
# Input a with sequence_length = [2, 1]
sparse_input_a = sparse_tensor.SparseTensorValue(
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Input b with sequence_length = [1, 1]
sparse_input_b = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0)),
values=(1., 10.),
dense_shape=(2, 2))
numeric_column_a = sfc.sequence_numeric_column('aaa')
numeric_column_b = sfc.sequence_numeric_column('bbb')
sequence_input_layer = sfc.SequenceFeatures(
[numeric_column_a, numeric_column_b])
with self.assertRaisesRegexp(
errors.InvalidArgumentError, r'Condition x == y did not hold.*'):
_, sequence_length = sequence_input_layer({
'aaa': sparse_input_a, 'bbb': sparse_input_b})
self.evaluate(sequence_length)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_shape': [2, 2, 4]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, values [[0., 1., 2., 3.]], [[4., 5., 6., 7.]]
# example 1, [[10., 11., 12., 13.], []]
'indices': ((0, 0, 0), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(0, 1, 0), (0, 1, 1), (0, 1, 2), (0, 1, 3),
(1, 0, 0), (1, 0, 1), (1, 0, 2), (1, 0, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 4)},
'expected_shape': [2, 2, 4]},
)
@test_util.run_in_graph_and_eager_modes
def test_static_shape_from_tensors_numeric(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
sequence_input_layer = sfc.SequenceFeatures([numeric_column])
input_layer, _ = sequence_input_layer({'aaa': sparse_input})
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected_shape': [4, 2, 3]},
{'testcase_name': '3D',
'sparse_input_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected_shape': [4, 2, 3]}
)
@test_util.run_in_graph_and_eager_modes
def test_static_shape_from_tensors_indicator(
self, sparse_input_args, expected_shape):
"""Tests that we return a known static shape when we have one."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=3)
indicator_column = fc.indicator_column(categorical_column)
sequence_input_layer = sfc.SequenceFeatures([indicator_column])
input_layer, _ = sequence_input_layer({'aaa': sparse_input})
shape = input_layer.get_shape()
self.assertEqual(shape, expected_shape)
@test_util.run_in_graph_and_eager_modes
def test_compute_output_shape(self):
price1 = sfc.sequence_numeric_column('price1', shape=2)
price2 = sfc.sequence_numeric_column('price2')
features = {
'price1': sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 0, 1],
[0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1],
[2, 0, 0], [2, 0, 1],
[3, 0, 0], [3, 0, 1]],
values=[0., 1., 10., 11., 100., 101., 200., 201., 300., 301.],
dense_shape=(4, 3, 2)),
'price2': sparse_tensor.SparseTensor(
indices=[[0, 0],
[0, 1],
[1, 0],
[2, 0],
[3, 0]],
values=[10., 11., 20., 30., 40.],
dense_shape=(4, 3))}
sequence_features = sfc.SequenceFeatures([price1, price2])
seq_input, seq_len = sequence_features(features)
self.assertEqual(
sequence_features.compute_output_shape((None, None)),
(None, None, 3))
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[[0., 1., 10.], [10., 11., 11.], [0., 0., 0.]],
[[100., 101., 20.], [0., 0., 0.], [0., 0., 0.]],
[[200., 201., 30.], [0., 0., 0.], [0., 0., 0.]],
[[300., 301., 40.], [0., 0., 0.], [0., 0., 0.]]],
self.evaluate(seq_input))
self.assertAllClose([2, 1, 1, 1], self.evaluate(seq_len))
@test_util.run_all_in_graph_and_eager_modes
class ConcatenateContextInputTest(test.TestCase, parameterized.TestCase):
"""Tests the utility fn concatenate_context_input."""
def test_concatenate_context_input(self):
seq_input = ops.convert_to_tensor(np.arange(12).reshape(2, 3, 2))
context_input = ops.convert_to_tensor(np.arange(10).reshape(2, 5))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
input_layer = sfc.concatenate_context_input(context_input, seq_input)
expected = np.array([
[[0, 1, 0, 1, 2, 3, 4], [2, 3, 0, 1, 2, 3, 4], [4, 5, 0, 1, 2, 3, 4]],
[[6, 7, 5, 6, 7, 8, 9], [8, 9, 5, 6, 7, 8, 9], [10, 11, 5, 6, 7, 8, 9]]
], dtype=np.float32)
output = self.evaluate(input_layer)
self.assertAllEqual(expected, output)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_3',
'seq_input_arg': np.arange(100).reshape(10, 10)},
{'testcase_name': 'rank_gt_3',
'seq_input_arg': np.arange(100).reshape(5, 5, 2, 2)}
)
def test_sequence_input_throws_error(self, seq_input_arg):
seq_input = ops.convert_to_tensor(seq_input_arg)
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'sequence_input must have rank 3'):
sfc.concatenate_context_input(context_input, seq_input)
@parameterized.named_parameters(
{'testcase_name': 'rank_lt_2',
'context_input_arg': np.arange(100)},
{'testcase_name': 'rank_gt_2',
'context_input_arg': np.arange(100).reshape(5, 5, 4)}
)
def test_context_input_throws_error(self, context_input_arg):
context_input = ops.convert_to_tensor(context_input_arg)
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'context_input must have rank 2'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_seq_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
context_input = math_ops.cast(context_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'sequence_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
def test_integer_context_input_throws_error(self):
seq_input = ops.convert_to_tensor(np.arange(100).reshape(5, 5, 4))
context_input = ops.convert_to_tensor(np.arange(100).reshape(10, 10))
seq_input = math_ops.cast(seq_input, dtype=dtypes.float32)
with self.assertRaisesRegexp(
TypeError, 'context_input must have dtype float32'):
sfc.concatenate_context_input(context_input, seq_input)
@test_util.run_all_in_graph_and_eager_modes
class DenseFeaturesTest(test.TestCase):
"""Tests DenseFeatures with sequence feature columns."""
def test_embedding_column(self):
"""Tests that error is raised for sequence embedding column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = fc.embedding_column(
categorical_column_a, dimension=2)
with self.assertRaisesRegexp(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
input_layer = fc.DenseFeatures([embedding_column_a])
_ = input_layer({'aaa': sparse_input})
def test_indicator_column(self):
"""Tests that error is raised for sequence indicator column."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = fc.indicator_column(categorical_column_a)
with self.assertRaisesRegexp(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
input_layer = fc.DenseFeatures([indicator_column_a])
_ = input_layer({'aaa': sparse_input})
def _assert_sparse_tensor_value(test_case, expected, actual):
_assert_sparse_tensor_indices_shape(test_case, expected, actual)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
def _assert_sparse_tensor_indices_shape(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
def _get_sequence_dense_tensor(column, features):
return column.get_sequence_dense_tensor(
fc.FeatureTransformationCache(features), None)
def _get_sequence_dense_tensor_state(column, features):
state_manager = fc._StateManagerImpl(Layer(), trainable=True)
column.create_state(state_manager)
dense_tensor, lengths = column.get_sequence_dense_tensor(
fc.FeatureTransformationCache(features), state_manager)
return dense_tensor, lengths, state_manager
def _get_sparse_tensors(column, features):
return column.get_sparse_tensors(
fc.FeatureTransformationCache(features), None)
@test_util.run_all_in_graph_and_eager_modes
class SequenceCategoricalColumnWithIdentityTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (1, 2, 0),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((1, 2, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': (6, 7, 8),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((6, 7, 8), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_identity('aaa', num_buckets=9)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
_assert_sparse_tensor_value(
self, expected, self.evaluate(id_weight_pair.id_tensor))
@test_util.run_all_in_graph_and_eager_modes
class SequenceCategoricalColumnWithHashBucketTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'stringer', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
# Ignored to avoid hash dependence in test.
'values': np.array((0, 0, 0), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_hash_bucket(
'aaa', hash_bucket_size=10)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
_assert_sparse_tensor_indices_shape(
self, expected, self.evaluate(id_weight_pair.id_tensor))
@test_util.run_all_in_graph_and_eager_modes
class SequenceCategoricalColumnWithVocabularyFileTest(
test.TestCase, parameterized.TestCase):
def _write_vocab(self, vocab_strings, file_name):
vocab_file = os.path.join(self.get_temp_dir(), file_name)
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_strings))
return vocab_file
def setUp(self):
super(SequenceCategoricalColumnWithVocabularyFileTest, self).setUp()
vocab_strings = ['omar', 'stringer', 'marlo']
self._wire_vocabulary_file_name = self._write_vocab(vocab_strings,
'wire_vocabulary.txt')
self._wire_vocabulary_size = 3
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self, expected, self.evaluate(id_weight_pair.id_tensor))
def test_get_sparse_tensors_dynamic_zero_length(self):
"""Tests _get_sparse_tensors with a dynamic sequence length."""
with ops.Graph().as_default():
inputs = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 2)), values=[], dense_shape=(2, 0))
expected = sparse_tensor.SparseTensorValue(
indices=np.zeros((0, 3)),
values=np.array((), dtype=np.int64),
dense_shape=(2, 0, 1))
column = sfc.sequence_categorical_column_with_vocabulary_file(
key='aaa',
vocabulary_file=self._wire_vocabulary_file_name,
vocabulary_size=self._wire_vocabulary_size)
input_placeholder_shape = list(inputs.dense_shape)
# Make second dimension (sequence length) dynamic.
input_placeholder_shape[1] = None
input_placeholder = array_ops.sparse_placeholder(
dtypes.string, shape=input_placeholder_shape)
id_weight_pair = _get_sparse_tensors(column, {'aaa': input_placeholder})
self.assertIsNone(id_weight_pair.weight_tensor)
with _initialized_session() as sess:
result = id_weight_pair.id_tensor.eval(
session=sess, feed_dict={input_placeholder: inputs})
_assert_sparse_tensor_value(
self, expected, result)
@test_util.run_all_in_graph_and_eager_modes
class SequenceCategoricalColumnWithVocabularyListTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
'indices': ((0, 0), (1, 0), (1, 1)),
'values': ('marlo', 'skywalker', 'omar'),
'dense_shape': (2, 2)},
'expected_args': {
'indices': ((0, 0, 0), (1, 0, 0), (1, 1, 0)),
'values': np.array((2, -1, 0), dtype=np.int64),
'dense_shape': (2, 2, 1)}},
{'testcase_name': '3D',
'inputs_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': ('omar', 'skywalker', 'marlo'),
'dense_shape': (2, 2, 2)},
'expected_args': {
'indices': ((0, 0, 2), (1, 0, 0), (1, 2, 0)),
'values': np.array((0, -1, 2), dtype=np.int64),
'dense_shape': (2, 2, 2)}}
)
def test_get_sparse_tensors(self, inputs_args, expected_args):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
expected = sparse_tensor.SparseTensorValue(**expected_args)
column = sfc.sequence_categorical_column_with_vocabulary_list(
key='aaa',
vocabulary_list=('omar', 'stringer', 'marlo'))
id_weight_pair = _get_sparse_tensors(column, {'aaa': inputs})
self.assertIsNone(id_weight_pair.weight_tensor)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
_assert_sparse_tensor_value(
self, expected, self.evaluate(id_weight_pair.id_tensor))
@test_util.run_all_in_graph_and_eager_modes
class SequenceEmbeddingColumnTest(
test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [0, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 0, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[7., 11.], [0., 0.]],
# example 1, ids [[0, 1], [2]]
[[2, 3.5], [7., 11.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [[1], [0, 2]]
[[3., 5.], [4., 6.5]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup, _, state_manager = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': inputs})
variables = state_manager._layer.weights
self.evaluate(variables_lib.global_variables_initializer())
self.assertCountEqual(
('embedding_weights:0',), tuple([v.name for v in variables]))
self.assertAllEqual(embedding_values, self.evaluate(variables[0]))
self.assertAllEqual(expected, self.evaluate(embedding_lookup))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=2)
_, sequence_length, _ = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': inputs})
sequence_length = self.evaluate(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = fc.embedding_column(
categorical_column, dimension=2)
_, sequence_length, _ = _get_sequence_dense_tensor_state(
embedding_column, {'aaa': sparse_input})
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
class SequenceSharedEmbeddingColumnTest(test.TestCase):
@test_util.run_deprecated_v1
def test_get_sequence_dense_tensor(self):
vocabulary_size = 3
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [1]
# example 1, ids [0, 2]
# example 2, ids [0]
# example 3, ids []
indices=((0, 0), (1, 0), (1, 1), (2, 0)),
values=(1, 0, 2, 0),
dense_shape=(4, 2))
expected_lookups_a = [
# example 0, ids [2]
[[7., 11.], [0., 0.]],
# example 1, ids [0, 1]
[[1., 2.], [3., 5.]],
# example 2, ids []
[[0., 0.], [0., 0.]],
# example 3, ids [1]
[[3., 5.], [0., 0.]],
]
expected_lookups_b = [
# example 0, ids [1]
[[3., 5.], [0., 0.]],
# example 1, ids [0, 2]
[[1., 2.], [7., 11.]],
# example 2, ids [0]
[[1., 2.], [0., 0.]],
# example 3, ids []
[[0., 0.], [0., 0.]],
]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer)
embedding_lookup_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[0]
embedding_lookup_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[0]
self.evaluate(variables_lib.global_variables_initializer())
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
self.assertAllEqual(embedding_values, self.evaluate(global_vars[0]))
self.assertAllEqual(
expected_lookups_a, self.evaluate(embedding_lookup_a))
self.assertAllEqual(expected_lookups_b, self.evaluate(embedding_lookup_b))
def test_sequence_length(self):
with ops.Graph().as_default():
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
expected_sequence_length_a = [1, 2]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [0, 2]
# example 1, ids [1]
indices=((0, 0), (0, 1), (1, 0)),
values=(0, 2, 1),
dense_shape=(2, 2))
expected_sequence_length_b = [2, 1]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[1]
sequence_length_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[1]
with _initialized_session() as sess:
sequence_length_a = sess.run(sequence_length_a)
self.assertAllEqual(expected_sequence_length_a, sequence_length_a)
self.assertEqual(np.int64, sequence_length_a.dtype)
sequence_length_b = sess.run(sequence_length_b)
self.assertAllEqual(expected_sequence_length_b, sequence_length_b)
self.assertEqual(np.int64, sequence_length_b.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
with ops.Graph().as_default():
vocabulary_size = 3
sparse_input_a = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length_a = [0, 1, 2, 0, 1, 0]
categorical_column_a = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sparse_input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
# example 2, ids []
# example 3, ids []
# example 4, ids [1]
# example 5, ids [0, 1]
indices=((0, 0), (4, 0), (5, 0), (5, 1)),
values=(2, 1, 0, 1),
dense_shape=(6, 2))
expected_sequence_length_b = [1, 0, 0, 0, 1, 2]
categorical_column_b = sfc.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
shared_embedding_columns = fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b], dimension=2)
sequence_length_a = _get_sequence_dense_tensor(
shared_embedding_columns[0], {'aaa': sparse_input_a})[1]
sequence_length_b = _get_sequence_dense_tensor(
shared_embedding_columns[1], {'bbb': sparse_input_b})[1]
with _initialized_session() as sess:
self.assertAllEqual(
expected_sequence_length_a, sequence_length_a.eval(session=sess))
self.assertAllEqual(
expected_sequence_length_b, sequence_length_b.eval(session=sess))
@test_util.run_all_in_graph_and_eager_modes
class SequenceIndicatorColumnTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
'indices': ((0, 0), (1, 0), (1, 1), (3, 0)),
'values': (2, 0, 1, 1),
'dense_shape': (4, 2)},
'expected': [
# example 0, ids [2]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [0, 1]
[[1., 0., 0.], [0., 1., 0.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [1]
[[0., 1., 0.], [0., 0., 0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
# example 2, ids []
# example 3, ids [[1], [2, 2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 1, 0), (3, 1, 1)),
'values': (2, 0, 1, 2, 1, 2, 2),
'dense_shape': (4, 2, 2)},
'expected': [
# example 0, ids [[2]]
[[0., 0., 1.], [0., 0., 0.]],
# example 1, ids [[0, 1], [2]]
[[1., 1., 0.], [0., 0., 1.]],
# example 2, ids []
[[0., 0., 0.], [0., 0., 0.]],
# example 3, ids [[1], [2, 2]]
[[0., 1., 0.], [0., 0., 2.]]]}
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
indicator_tensor, _ = _get_sequence_dense_tensor(
indicator_column, {'aaa': inputs})
self.assertAllEqual(expected, self.evaluate(indicator_tensor))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2, 0, 1),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2]},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2, 0, 1, 2),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2]}
)
def test_sequence_length(self, inputs_args, expected_sequence_length):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
vocabulary_size = 3
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
_, sequence_length = _get_sequence_dense_tensor(
indicator_column, {'aaa': inputs})
sequence_length = self.evaluate(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [2]
# example 2, ids [0, 1]
# example 3, ids []
# example 4, ids [1]
# example 5, ids []
indices=((1, 0), (2, 0), (2, 1), (4, 0)),
values=(2, 0, 1, 1),
dense_shape=(6, 2))
expected_sequence_length = [0, 1, 2, 0, 1, 0]
categorical_column = sfc.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column = fc.indicator_column(categorical_column)
_, sequence_length = _get_sequence_dense_tensor(
indicator_column, {'aaa': sparse_input})
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
@test_util.run_all_in_graph_and_eager_modes
class SequenceNumericColumnTest(test.TestCase, parameterized.TestCase):
def test_defaults(self):
a = sfc.sequence_numeric_column('aaa')
self.assertEqual('aaa', a.key)
self.assertEqual('aaa', a.name)
self.assertEqual((1,), a.shape)
self.assertEqual(0., a.default_value)
self.assertEqual(dtypes.float32, a.dtype)
self.assertIsNone(a.normalizer_fn)
def test_shape_saved_as_tuple(self):
a = sfc.sequence_numeric_column('aaa', shape=[1, 2])
self.assertEqual((1, 2), a.shape)
def test_shape_must_be_positive_integer(self):
with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'):
sfc.sequence_numeric_column('aaa', shape=[1.0])
with self.assertRaisesRegexp(
ValueError, 'shape dimensions must be greater than 0'):
sfc.sequence_numeric_column('aaa', shape=[0])
def test_dtype_is_convertible_to_float(self):
with self.assertRaisesRegexp(
ValueError, 'dtype must be convertible to float'):
sfc.sequence_numeric_column('aaa', dtype=dtypes.string)
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegexp(TypeError, 'must be a callable'):
sfc.sequence_numeric_column('aaa', normalizer_fn='NotACallable')
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, values [0., 1]
# example 1, [10.]
'indices': ((0, 0), (0, 1), (1, 0)),
'values': (0., 1., 10.),
'dense_shape': (2, 2)},
'expected': [
[[0.], [1.]],
[[10.], [0.]]]},
{'testcase_name': '3D',
'inputs_args': {
# feature 0, ids [[20, 3], [5]]
# feature 1, ids [[3], [8]]
'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)),
'values': (20, 3, 5., 3., 8.),
'dense_shape': (2, 2, 2)},
'expected': [
[[20.], [3.], [5.], [0.]],
[[3.], [0.], [8.], [0.]]]},
)
def test_get_sequence_dense_tensor(self, inputs_args, expected):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa')
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': inputs})
self.assertAllEqual(expected, self.evaluate(dense_tensor))
def test_get_sequence_dense_tensor_with_normalizer_fn(self):
def _increment_two(input_sparse_tensor):
return sparse_ops.sparse_add(
input_sparse_tensor,
sparse_tensor.SparseTensor(((0, 0), (1, 1)), (2.0, 2.0), (2, 2))
)
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values [[0.], [1]]
# example 1, [[10.]]
indices=((0, 0), (0, 1), (1, 0)),
values=(0., 1., 10.),
dense_shape=(2, 2))
# Before _increment_two:
# [[0.], [1.]],
# [[10.], [0.]],
# After _increment_two:
# [[2.], [1.]],
# [[10.], [2.]],
expected_dense_tensor = [
[[2.], [1.]],
[[10.], [2.]],
]
numeric_column = sfc.sequence_numeric_column(
'aaa', normalizer_fn=_increment_two)
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
self.assertAllEqual(
expected_dense_tensor, self.evaluate(dense_tensor))
@parameterized.named_parameters(
{'testcase_name': '2D',
'sparse_input_args': {
# example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]
# example 1, [[[10., 11.], [12., 13.]]]
'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6),
(0, 7), (1, 0), (1, 1), (1, 2), (1, 3)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 8)},
'expected_dense_tensor': [
[[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]],
[[[10., 11.], [12., 13.]], [[0., 0.], [0., 0.]]]]},
{'testcase_name': '3D',
'sparse_input_args': {
'indices': ((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6),
(0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6),
(1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)),
'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.),
'dense_shape': (2, 2, 8)},
'expected_dense_tensor': [
[[[0., 0.], [1., 0.]], [[2., 0.], [3., 0.]],
[[4., 0.], [5., 0.]], [[6., 0.], [7., 0.]]],
[[[10., 0.], [11., 0.]], [[12., 0.], [13., 0.]],
[[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]]]},
)
def test_get_dense_tensor_multi_dim(
self, sparse_input_args, expected_dense_tensor):
"""Tests get_sequence_dense_tensor for multi-dim numeric_column."""
sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2))
dense_tensor, _ = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
self.assertAllEqual(
expected_dense_tensor, self.evaluate(dense_tensor))
@parameterized.named_parameters(
{'testcase_name': '2D',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '3D',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (1,)},
{'testcase_name': '2D_with_shape',
'inputs_args': {
# example 0, ids [2]
# example 1, ids [0, 1]
'indices': ((0, 0), (1, 0), (1, 1)),
'values': (2., 0., 1.),
'dense_shape': (2, 2)},
'expected_sequence_length': [1, 1],
'shape': (2,)},
{'testcase_name': '3D_with_shape',
'inputs_args': {
# example 0, ids [[2]]
# example 1, ids [[0, 1], [2]]
'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
'values': (2., 0., 1., 2.),
'dense_shape': (2, 2, 2)},
'expected_sequence_length': [1, 2],
'shape': (2,)},
)
def test_sequence_length(self, inputs_args, expected_sequence_length, shape):
inputs = sparse_tensor.SparseTensorValue(**inputs_args)
numeric_column = sfc.sequence_numeric_column('aaa', shape=shape)
_, sequence_length = _get_sequence_dense_tensor(
numeric_column, {'aaa': inputs})
sequence_length = self.evaluate(sequence_length)
self.assertAllEqual(expected_sequence_length, sequence_length)
self.assertEqual(np.int64, sequence_length.dtype)
def test_sequence_length_with_empty_rows(self):
"""Tests _sequence_length when some examples do not have ids."""
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, values []
# example 1, values [[0.], [1.]]
# example 2, [[2.]]
# example 3, values []
# example 4, [[3.]]
# example 5, values []
indices=((1, 0), (1, 1), (2, 0), (4, 0)),
values=(0., 1., 2., 3.),
dense_shape=(6, 2))
expected_sequence_length = [0, 2, 1, 0, 1, 0]
numeric_column = sfc.sequence_numeric_column('aaa')
_, sequence_length = _get_sequence_dense_tensor(
numeric_column, {'aaa': sparse_input})
self.assertAllEqual(
expected_sequence_length, self.evaluate(sequence_length))
def test_serialization(self):
"""Tests that column can be serialized."""
def _custom_fn(input_tensor):
return input_tensor + 42
column = sfc.sequence_numeric_column(
key='my-key', shape=(2,), default_value=3, dtype=dtypes.int32,
normalizer_fn=_custom_fn)
configs = serialization.serialize_feature_column(column)
column = serialization.deserialize_feature_column(
configs, custom_objects={_custom_fn.__name__: _custom_fn})
self.assertEqual(column.key, 'my-key')
self.assertEqual(column.shape, (2,))
self.assertEqual(column.default_value, 3)
self.assertEqual(column.normalizer_fn(3), 45)
with self.assertRaisesRegex(ValueError,
'Instance: 0 is not a FeatureColumn'):
serialization.serialize_feature_column(int())
def test_parents(self):
"""Tests parents attribute of column."""
column = sfc.sequence_numeric_column(key='my-key')
self.assertEqual(column.parents, ['my-key'])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/feature_column/sequence_feature_column_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn abstraction.
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
1. Feature type:
* Continuous features can be represented by `numeric_column`.
* Categorical features can be represented by any `categorical_column_with_*`
column:
- `categorical_column_with_vocabulary_list`
- `categorical_column_with_vocabulary_file`
- `categorical_column_with_hash_bucket`
- `categorical_column_with_identity`
- `weighted_categorical_column`
2. Model type:
* Deep neural network models (`DNNClassifier`, `DNNRegressor`).
Continuous features can be directly fed into deep neural network models.
age_column = numeric_column("age")
To feed sparse features into DNN models, wrap the column with
`embedding_column` or `indicator_column`. `indicator_column` is recommended
for features with only a few possible values. For features with many
possible values, to reduce the size of your model, `embedding_column` is
recommended.
embedded_dept_column = embedding_column(
categorical_column_with_vocabulary_list(
"department", ["math", "philosophy", ...]), dimension=10)
* Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`).
Sparse features can be fed directly into linear models. They behave like an
indicator column but with an efficient implementation.
dept_column = categorical_column_with_vocabulary_list("department",
["math", "philosophy", "english"])
It is recommended that continuous features be bucketized before being
fed into linear models.
bucketized_age_column = bucketized_column(
source_column=age_column,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
Sparse features can be crossed (also known as conjuncted or combined) in
order to form non-linearities, and then fed into linear models.
cross_dept_age_column = crossed_column(
columns=["department", bucketized_age_column],
hash_bucket_size=1000)
Example of building canned `Estimator`s using FeatureColumns:
```python
# Define features and transformations
deep_feature_columns = [age_column, embedded_dept_column]
wide_feature_columns = [dept_column, bucketized_age_column,
cross_dept_age_column]
# Build deep model
estimator = DNNClassifier(
feature_columns=deep_feature_columns,
hidden_units=[500, 250, 50])
estimator.train(...)
# Or build a wide model
estimator = LinearClassifier(
feature_columns=wide_feature_columns)
estimator.train(...)
# Or build a wide and deep model!
estimator = DNNLinearCombinedClassifier(
linear_feature_columns=wide_feature_columns,
dnn_feature_columns=deep_feature_columns,
dnn_hidden_units=[500, 250, 50])
estimator.train(...)
```
FeatureColumns can also be transformed into a generic input layer for
custom models using `input_layer`.
Example of building model using FeatureColumns, this can be used in a
`model_fn` which is given to the {tf.estimator.Estimator}:
```python
# Building model via layers
deep_feature_columns = [age_column, embedded_dept_column]
columns_to_tensor = parse_feature_columns_from_examples(
serialized=my_data,
feature_columns=deep_feature_columns)
first_layer = input_layer(
features=columns_to_tensor,
feature_columns=deep_feature_columns)
second_layer = fully_connected(first_layer, ...)
```
NOTE: Functions prefixed with "_" indicate experimental or private parts of
the API subject to change, and should not be relied upon!
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import math
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
# TODO(b/118385027): Dependency on keras can be problematic if Keras moves out
# of the main repo.
from tensorflow.python.keras import initializers
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
_FEATURE_COLUMN_DEPRECATION_DATE = None
_FEATURE_COLUMN_DEPRECATION = ('The old _FeatureColumn APIs are being '
'deprecated. Please use the new FeatureColumn '
'APIs instead.')
class StateManager(object):
"""Manages the state associated with FeatureColumns.
Some `FeatureColumn`s create variables or resources to assist their
computation. The `StateManager` is responsible for creating and storing these
objects since `FeatureColumn`s are supposed to be stateless configuration
only.
"""
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
"""Creates a new variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
trainable: Whether this variable is trainable or not.
use_resource: If true, we use resource variables. Otherwise we use
RefVariable.
initializer: initializer instance (callable).
Returns:
The created variable.
"""
del feature_column, name, shape, dtype, trainable, use_resource, initializer
raise NotImplementedError('StateManager.create_variable')
def add_variable(self, feature_column, var):
"""Adds an existing variable to the state.
Args:
feature_column: A `FeatureColumn` object to associate this variable with.
var: The variable.
"""
del feature_column, var
raise NotImplementedError('StateManager.add_variable')
def get_variable(self, feature_column, name):
"""Returns an existing variable.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: variable name.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_var')
def add_resource(self, feature_column, name, resource):
"""Creates a new resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this resource corresponds to.
name: Name of the resource.
resource: The resource.
Returns:
The created resource.
"""
del feature_column, name, resource
raise NotImplementedError('StateManager.add_resource')
def get_resource(self, feature_column, name):
"""Returns an already created resource.
Resources can be things such as tables etc.
Args:
feature_column: A `FeatureColumn` object this variable corresponds to.
name: Name of the resource.
"""
del feature_column, name
raise NotImplementedError('StateManager.get_resource')
class _StateManagerImpl(StateManager):
"""Manages the state of DenseFeatures and LinearLayer."""
def __init__(self, layer, trainable):
"""Creates an _StateManagerImpl object.
Args:
layer: The input layer this state manager is associated with.
trainable: Whether by default, variables created are trainable or not.
"""
self._trainable = trainable
self._layer = layer
self._cols_to_vars_map = collections.defaultdict(lambda: {})
# TODO(vbardiovsky): Make sure the resources are tracked by moving them to
# the layer (inheriting from AutoTrackable), e.g.:
# self._layer._resources_map = data_structures.Mapping()
self._cols_to_resources_map = collections.defaultdict(lambda: {})
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if name in self._cols_to_vars_map[feature_column]:
raise ValueError('Variable already exists.')
var = self._layer.add_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
trainable=self._trainable and trainable,
use_resource=use_resource,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
self._cols_to_vars_map[feature_column][name] = var
return var
def get_variable(self, feature_column, name):
if name in self._cols_to_vars_map[feature_column]:
return self._cols_to_vars_map[feature_column][name]
raise ValueError('Variable does not exist.')
def add_resource(self, feature_column, name, resource):
self._cols_to_resources_map[feature_column][name] = resource
def get_resource(self, feature_column, name):
if name in self._cols_to_resources_map[feature_column]:
return self._cols_to_resources_map[feature_column][name]
raise ValueError('Resource does not exist.')
class _BaseFeaturesLayer(Layer):
"""Base class for DenseFeatures and SequenceFeatures.
Defines common methods and helpers.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model.
expected_column_type: Expected class for provided feature columns.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` doesn't match
`expected_column_type`.
"""
def __init__(self, feature_columns, expected_column_type, trainable, name,
**kwargs):
super(_BaseFeaturesLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
self._state_manager = _StateManagerImpl(self, self.trainable)
for column in self._feature_columns:
if not isinstance(column, expected_column_type):
raise ValueError(
'Items of feature_columns must be a {}. '
'You can wrap a categorical column with an '
'embedding_column or indicator_column. Given: {}'.format(
expected_column_type, column))
def build(self, _):
for column in self._feature_columns:
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
with variable_scope._pure_variable_scope(column.name): # pylint: disable=protected-access
column.create_state(self._state_manager)
super(_BaseFeaturesLayer, self).build(None)
def _output_shape(self, input_shape, num_elements):
"""Computes expected output shape of the layer or a column's dense tensor.
Args:
input_shape: Tensor or array with batch shape.
num_elements: Size of the last dimension of the output.
Returns:
Tuple with output shape.
"""
raise NotImplementedError('Calling an abstract method.')
def compute_output_shape(self, input_shape):
total_elements = 0
for column in self._feature_columns:
total_elements += column.variable_shape.num_elements()
return self._target_shape(input_shape, total_elements)
def _process_dense_tensor(self, column, tensor):
"""Reshapes the dense tensor output of a column based on expected shape.
Args:
column: A DenseColumn or SequenceDenseColumn object.
tensor: A dense tensor obtained from the same column.
Returns:
Reshaped dense tensor."""
num_elements = column.variable_shape.num_elements()
target_shape = self._target_shape(array_ops.shape(tensor), num_elements)
return array_ops.reshape(tensor, shape=target_shape)
def _verify_and_concat_tensors(self, output_tensors):
"""Verifies and concatenates the dense output of several columns."""
_verify_static_batch_size_equality(output_tensors, self._feature_columns)
return array_ops.concat(output_tensors, -1)
@keras_export('keras.layers.DenseFeatures')
class DenseFeatures(_BaseFeaturesLayer):
"""A layer that produces a dense `Tensor` based on given `feature_columns`.
Generally a single example in training data is described with FeatureColumns.
At the first layer of the model, this column oriented data should be converted
to a single `Tensor`.
This layer can be called multiple times with different features.
Example:
```python
price = numeric_column('price')
keywords_embedded = embedding_column(
categorical_column_with_hash_bucket("keywords", 10K), dimensions=16)
columns = [price, keywords_embedded, ...]
feature_layer = DenseFeatures(columns)
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = feature_layer(features)
for units in [128, 64, 32]:
dense_tensor = tf.keras.layers.Dense(units, activation='relu')(dense_tensor)
prediction = tf.keras.layers.Dense(1)(dense_tensor)
```
"""
def __init__(self,
feature_columns,
trainable=True,
name=None,
**kwargs):
"""Constructs a DenseFeatures.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `DenseColumn` such as `numeric_column`, `embedding_column`,
`bucketized_column`, `indicator_column`. If you have categorical
features, you can wrap them with an `embedding_column` or
`indicator_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the DenseFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is not a `DenseColumn`.
"""
super(DenseFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
expected_column_type=DenseColumn,
**kwargs)
@property
def _is_feature_layer(self):
return True
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], total_elements)
def call(self, features, cols_to_output_tensors=None):
"""Returns a dense tensor corresponding to the `feature_columns`.
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values can be a `SparseTensor` or a `Tensor` depends
on corresponding `FeatureColumn`.
cols_to_output_tensors: If not `None`, this will be filled with a dict
mapping feature columns to output tensors created.
Returns:
A `Tensor` which represents input layer of a model. Its shape
is (batch_size, first_layer_dimension) and its dtype is `float32`.
first_layer_dimension is determined based on given `feature_columns`.
Raises:
ValueError: If features are not a dictionary.
"""
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: ',
features)
transformation_cache = FeatureTransformationCache(features)
output_tensors = []
for column in self._feature_columns:
with ops.name_scope(column.name):
tensor = column.get_dense_tensor(transformation_cache,
self._state_manager)
processed_tensors = self._process_dense_tensor(column, tensor)
if cols_to_output_tensors is not None:
cols_to_output_tensors[column] = processed_tensors
output_tensors.append(processed_tensors)
return self._verify_and_concat_tensors(output_tensors)
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {'feature_columns': column_configs}
base_config = super( # pylint: disable=bad-super-call
DenseFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
config_cp['feature_columns'] = serialization.deserialize_feature_columns(
config['feature_columns'], custom_objects=custom_objects)
return cls(**config_cp)
class _LinearModelLayer(Layer):
"""Layer that contains logic for `LinearModel`."""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
super(_LinearModelLayer, self).__init__(
name=name, trainable=trainable, **kwargs)
self._feature_columns = _normalize_feature_columns(feature_columns)
for column in self._feature_columns:
if not isinstance(column, (DenseColumn, CategoricalColumn)):
raise ValueError(
'Items of feature_columns must be either a '
'DenseColumn or CategoricalColumn. Given: {}'.format(column))
self._units = units
self._sparse_combiner = sparse_combiner
self._state_manager = _StateManagerImpl(self, self.trainable)
self.bias = None
def build(self, _):
# We need variable scopes for now because we want the variable partitioning
# information to percolate down. We also use _pure_variable_scope's here
# since we want to open up a name_scope in the `call` method while creating
# the ops.
with variable_scope._pure_variable_scope(self.name): # pylint: disable=protected-access
for column in self._feature_columns:
with variable_scope._pure_variable_scope(column.name): # pylint: disable=protected-access
# Create the state for each feature column
column.create_state(self._state_manager)
# Create a weight variable for each column.
if isinstance(column, CategoricalColumn):
first_dim = column.num_buckets
else:
first_dim = column.variable_shape.num_elements()
self._state_manager.create_variable(
column,
name='weights',
dtype=dtypes.float32,
shape=(first_dim, self._units),
initializer=init_ops.zeros_initializer(),
trainable=self.trainable)
# Create a bias variable.
self.bias = self.add_variable(
name='bias_weights',
dtype=dtypes.float32,
shape=[self._units],
initializer=init_ops.zeros_initializer(),
trainable=self.trainable,
use_resource=True,
# TODO(rohanj): Get rid of this hack once we have a mechanism for
# specifying a default partitioner for an entire layer. In that case,
# the default getter for Layers should work.
getter=variable_scope.get_variable)
super(_LinearModelLayer, self).build(None)
def call(self, features):
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: {}'
.format(features))
with ops.name_scope(self.name):
transformation_cache = FeatureTransformationCache(features)
weighted_sums = []
for column in self._feature_columns:
with ops.name_scope(column.name):
# All the weights used in the linear model are owned by the state
# manager associated with this Linear Model.
weight_var = self._state_manager.get_variable(column, 'weights')
weighted_sum = _create_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=self._state_manager,
sparse_combiner=self._sparse_combiner,
weight_var=weight_var)
weighted_sums.append(weighted_sum)
_verify_static_batch_size_equality(weighted_sums, self._feature_columns)
predictions_no_bias = math_ops.add_n(
weighted_sums, name='weighted_sum_no_bias')
predictions = nn_ops.bias_add(
predictions_no_bias, self.bias, name='weighted_sum')
return predictions
def get_config(self):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
column_configs = serialization.serialize_feature_columns(
self._feature_columns)
config = {
'feature_columns': column_configs,
'units': self._units,
'sparse_combiner': self._sparse_combiner
}
base_config = super( # pylint: disable=bad-super-call
_LinearModelLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
# Import here to avoid circular imports.
from tensorflow.python.feature_column import serialization # pylint: disable=g-import-not-at-top
config_cp = config.copy()
columns = serialization.deserialize_feature_columns(
config_cp['feature_columns'], custom_objects=custom_objects)
del config_cp['feature_columns']
return cls(feature_columns=columns, **config_cp)
class LinearModel(training.Model):
"""Produces a linear prediction `Tensor` based on given `feature_columns`.
This layer generates a weighted sum based on output dimension `units`.
Weighted sum refers to logits in classification problems. It refers to the
prediction itself for linear regression problems.
Note on supported columns: `LinearLayer` treats categorical columns as
`indicator_column`s. To be specific, assume the input as `SparseTensor` looks
like:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
`linear_model` assigns weights for the presence of "a", "b", "c' implicitly,
just like `indicator_column`, while `input_layer` explicitly requires wrapping
each of categorical columns with an `embedding_column` or an
`indicator_column`.
Example of usage:
```python
price = numeric_column('price')
price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.])
keywords = categorical_column_with_hash_bucket("keywords", 10K)
keywords_price = crossed_column('keywords', price_buckets, ...)
columns = [price_buckets, keywords, keywords_price ...]
linear_model = LinearLayer(columns)
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
prediction = linear_model(features)
```
"""
def __init__(self,
feature_columns,
units=1,
sparse_combiner='sum',
trainable=True,
name=None,
**kwargs):
"""Constructs a LinearLayer.
Args:
feature_columns: An iterable containing the FeatureColumns to use as
inputs to your model. All items should be instances of classes derived
from `_FeatureColumn`s.
units: An integer, dimensionality of the output space. Default value is 1.
sparse_combiner: A string specifying how to reduce if a categorical column
is multivalent. Except `numeric_column`, almost all columns passed to
`linear_model` are considered as categorical columns. It combines each
categorical column independently. Currently "mean", "sqrtn" and "sum"
are supported, with "sum" the default for linear model. "sqrtn" often
achieves good accuracy, in particular with bag-of-words columns.
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For example, for two features represented as the categorical columns:
```python
# Feature 1
shape = [2, 2]
{
[0, 0]: "a"
[0, 1]: "b"
[1, 0]: "c"
}
# Feature 2
shape = [2, 3]
{
[0, 0]: "d"
[1, 0]: "e"
[1, 1]: "f"
[1, 2]: "g"
}
```
with `sparse_combiner` as "mean", the linear model outputs conceptly are
```
y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0
y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1
```
where `y_i` is the output, `b_i` is the bias, and `w_x` is the weight
assigned to the presence of `x` in the input features.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: Name to give to the Linear Model. All variables and ops created will
be scoped by this name.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: if an item in `feature_columns` is neither a `DenseColumn`
nor `CategoricalColumn`.
"""
super(LinearModel, self).__init__(name=name, **kwargs)
self.layer = _LinearModelLayer(
feature_columns,
units,
sparse_combiner,
trainable,
name=self.name,
**kwargs)
def call(self, features):
"""Returns a `Tensor` the represents the predictions of a linear model.
Args:
features: A mapping from key to tensors. `_FeatureColumn`s look up via
these keys. For example `numeric_column('price')` will look at 'price'
key in this dict. Values are `Tensor` or `SparseTensor` depending on
corresponding `_FeatureColumn`.
Returns:
A `Tensor` which represents predictions/logits of a linear model. Its
shape is (batch_size, units) and its dtype is `float32`.
Raises:
ValueError: If features are not a dictionary.
"""
return self.layer(features)
@property
def bias(self):
return self.layer.bias
def _transform_features_v2(features, feature_columns, state_manager):
"""Returns transformed features based on features columns passed in.
Please note that most probably you would not need to use this function. Please
check `input_layer` and `linear_model` to see whether they will
satisfy your use case or not.
Example:
```python
# Define features and transformations
crosses_a_x_b = crossed_column(
columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000)
price_buckets = bucketized_column(
source_column=numeric_column("price"), boundaries=[...])
columns = [crosses_a_x_b, price_buckets]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
transformed = transform_features(features=features, feature_columns=columns)
assertCountEqual(columns, transformed.keys())
```
Args:
features: A mapping from key to tensors. `FeatureColumn`s look up via these
keys. For example `numeric_column('price')` will look at 'price' key in
this dict. Values can be a `SparseTensor` or a `Tensor` depends on
corresponding `FeatureColumn`.
feature_columns: An iterable containing all the `FeatureColumn`s.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
A `dict` mapping `FeatureColumn` to `Tensor` and `SparseTensor` values.
"""
feature_columns = _normalize_feature_columns(feature_columns)
outputs = {}
with ops.name_scope(
None, default_name='transform_features', values=features.values()):
transformation_cache = FeatureTransformationCache(features)
for column in feature_columns:
with ops.name_scope(None, default_name=column.name):
outputs[column] = transformation_cache.get(column, state_manager)
return outputs
@tf_export('feature_column.make_parse_example_spec', v1=[])
def make_parse_example_spec_v2(feature_columns):
"""Creates parsing spec dictionary from input feature_columns.
The returned dictionary can be used as arg 'features' in
`tf.io.parse_example`.
Typical usage example:
```python
# Define features and transformations
feature_a = categorical_column_with_vocabulary_file(...)
feature_b = numeric_column(...)
feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...)
feature_a_x_feature_c = crossed_column(
columns=["feature_a", feature_c_bucketized], ...)
feature_columns = set(
[feature_b, feature_c_bucketized, feature_a_x_feature_c])
features = tf.io.parse_example(
serialized=serialized_examples,
features=make_parse_example_spec(feature_columns))
```
For the above example, make_parse_example_spec would return the dict:
```python
{
"feature_a": parsing_ops.VarLenFeature(tf.string),
"feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32),
"feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32)
}
```
Args:
feature_columns: An iterable containing all feature columns. All items
should be instances of classes derived from `FeatureColumn`.
Returns:
A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature`
value.
Raises:
ValueError: If any of the given `feature_columns` is not a `FeatureColumn`
instance.
"""
result = {}
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('All feature_columns must be FeatureColumn instances. '
'Given: {}'.format(column))
config = column.parse_example_spec
for key, value in six.iteritems(config):
if key in result and value != result[key]:
raise ValueError(
'feature_columns contain different parse_spec for key '
'{}. Given {} and {}'.format(key, value, result[key]))
result.update(config)
return result
@tf_export('feature_column.embedding_column')
def embedding_column(categorical_column,
dimension,
combiner='mean',
initializer=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""`DenseColumn` that converts from sparse, categorical input.
Use this when your inputs are sparse, but you want to convert them to a dense
representation (e.g., to feed to a DNN).
Inputs must be a `CategoricalColumn` created by any of the
`categorical_column_*` function. Here is an example of using
`embedding_column` with `DNNClassifier`:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `embedding_column` with model_fn:
```python
def model_fn(features, ...):
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [embedding_column(video_id, 9),...]
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_column: A `CategoricalColumn` created by a
`categorical_column_with_*` function. This column produces the sparse IDs
that are inputs to the embedding lookup.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, embedding values are l2-normalized to this value.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
`DenseColumn` that converts from sparse input.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: If eager execution is enabled.
"""
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified. '
'Embedding of column_name: {}'.format(
categorical_column.name))
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
return EmbeddingColumn(
categorical_column=categorical_column,
dimension=dimension,
combiner=combiner,
initializer=initializer,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable)
@tf_export(v1=['feature_column.shared_embedding_columns'])
def shared_embedding_columns(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries in
a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and
standard deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional name of the collection where
shared embedding weights are added. If not given, a reasonable name will
be chosen based on the names of `categorical_columns`. This is also used
in `variable_scope` when creating shared embedding weights.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which
to restore the column weights. Required if `ckpt_to_load_from` is not
`None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0._num_buckets # pylint: disable=protected-access
if not isinstance(c0, fc_old._CategoricalColumn): # pylint: disable=protected-access
raise ValueError(
'All categorical_columns must be subclasses of _CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0,
(fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn)): # pylint: disable=protected-access
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(
c, (fc_old._WeightedCategoricalColumn, WeightedCategoricalColumn)): # pylint: disable=protected-access
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c._num_buckets: # pylint: disable=protected-access
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c._num_buckets)) # pylint: disable=protected-access
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
result = []
for column in categorical_columns:
result.append(
fc_old._SharedEmbeddingColumn( # pylint: disable=protected-access
categorical_column=column,
initializer=initializer,
dimension=dimension,
combiner=combiner,
shared_embedding_collection_name=shared_embedding_collection_name,
ckpt_to_load_from=ckpt_to_load_from,
tensor_name_in_ckpt=tensor_name_in_ckpt,
max_norm=max_norm,
trainable=trainable))
return result
@tf_export('feature_column.shared_embeddings', v1=[])
def shared_embedding_columns_v2(categorical_columns,
dimension,
combiner='mean',
initializer=None,
shared_embedding_collection_name=None,
ckpt_to_load_from=None,
tensor_name_in_ckpt=None,
max_norm=None,
trainable=True):
"""List of dense columns that convert from sparse, categorical input.
This is similar to `embedding_column`, except that it produces a list of
embedding columns that share the same embedding weights.
Use this when your inputs are sparse and of the same type (e.g. watched and
impression video IDs that share the same vocabulary), and you want to convert
them to a dense representation (e.g., to feed to a DNN).
Inputs must be a list of categorical columns created by any of the
`categorical_column_*` function. They must all be of the same type and have
the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file. Some or
all columns could also be weighted_categorical_column.
Here is an example embedding of two features for a DNNClassifier model:
```python
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
estimator = tf.estimator.DNNClassifier(feature_columns=columns, ...)
label_column = ...
def input_fn():
features = tf.io.parse_example(
..., features=make_parse_example_spec(columns + [label_column]))
labels = features.pop(label_column.name)
return features, labels
estimator.train(input_fn=input_fn, steps=100)
```
Here is an example using `shared_embedding_columns` with model_fn:
```python
def model_fn(features, ...):
watched_video_id = categorical_column_with_vocabulary_file(
'watched_video_id', video_vocabulary_file, video_vocabulary_size)
impression_video_id = categorical_column_with_vocabulary_file(
'impression_video_id', video_vocabulary_file, video_vocabulary_size)
columns = shared_embedding_columns(
[watched_video_id, impression_video_id], dimension=10)
dense_tensor = input_layer(features, columns)
# Form DNN layers, calculate loss, and return EstimatorSpec.
...
```
Args:
categorical_columns: List of categorical columns created by a
`categorical_column_with_*` function. These columns produce the sparse IDs
that are inputs to the embedding lookup. All columns must be of the same
type and have the same arguments except `key`. E.g. they can be
categorical_column_with_vocabulary_file with the same vocabulary_file.
Some or all columns could also be weighted_categorical_column.
dimension: An integer specifying dimension of the embedding, must be > 0.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with
'mean' the default. 'sqrtn' often achieves good accuracy, in particular
with bag-of-words columns. Each of this can be thought as example level
normalizations on the column. For more information, see
`tf.embedding_lookup_sparse`.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`truncated_normal_initializer` with mean `0.0` and standard
deviation `1/sqrt(dimension)`.
shared_embedding_collection_name: Optional collective name of these columns.
If not given, a reasonable name will be chosen based on the names of
`categorical_columns`.
ckpt_to_load_from: String representing checkpoint name/pattern from which to
restore column weights. Required if `tensor_name_in_ckpt` is not `None`.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from
which to restore the column weights. Required if `ckpt_to_load_from` is
not `None`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
trainable: Whether or not the embedding is trainable. Default is True.
Returns:
A list of dense columns that converts from sparse input. The order of
results follows the ordering of `categorical_columns`.
Raises:
ValueError: if `dimension` not > 0.
ValueError: if any of the given `categorical_columns` is of different type
or has different arguments than the others.
ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt`
is specified.
ValueError: if `initializer` is specified and is not callable.
RuntimeError: if eager execution is enabled.
"""
if context.executing_eagerly():
raise RuntimeError('shared_embedding_columns are not supported when eager '
'execution is enabled.')
if (dimension is None) or (dimension < 1):
raise ValueError('Invalid dimension {}.'.format(dimension))
if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None):
raise ValueError('Must specify both `ckpt_to_load_from` and '
'`tensor_name_in_ckpt` or none of them.')
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1. / math.sqrt(dimension))
# Sort the columns so the default collection name is deterministic even if the
# user passes columns from an unsorted collection, such as dict.values().
sorted_columns = sorted(categorical_columns, key=lambda x: x.name)
c0 = sorted_columns[0]
num_buckets = c0.num_buckets
if not isinstance(c0, CategoricalColumn):
raise ValueError(
'All categorical_columns must be subclasses of CategoricalColumn. '
'Given: {}, of type: {}'.format(c0, type(c0)))
if isinstance(c0, WeightedCategoricalColumn):
c0 = c0.categorical_column
for c in sorted_columns[1:]:
if isinstance(c, WeightedCategoricalColumn):
c = c.categorical_column
if not isinstance(c, type(c0)):
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same type, or be weighted_categorical_column of the same type. '
'Given column: {} of type: {} does not match given column: {} of '
'type: {}'.format(c0, type(c0), c, type(c)))
if num_buckets != c.num_buckets:
raise ValueError(
'To use shared_embedding_column, all categorical_columns must have '
'the same number of buckets. Given column: {} with buckets: {} does '
'not match column: {} with buckets: {}'.format(
c0, num_buckets, c, c.num_buckets))
if not shared_embedding_collection_name:
shared_embedding_collection_name = '_'.join(c.name for c in sorted_columns)
shared_embedding_collection_name += '_shared_embedding'
column_creator = SharedEmbeddingColumnCreator(
dimension, initializer, ckpt_to_load_from, tensor_name_in_ckpt,
num_buckets, trainable, shared_embedding_collection_name)
result = []
for column in categorical_columns:
result.append(
column_creator(
categorical_column=column, combiner=combiner, max_norm=max_norm))
return result
@tf_export('feature_column.numeric_column')
def numeric_column(key,
shape=(1,),
default_value=None,
dtype=dtypes.float32,
normalizer_fn=None):
"""Represents real valued or numerical features.
Example:
```python
price = numeric_column('price')
columns = [price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
# or
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
shape: An iterable of integers specifies the shape of the `Tensor`. An
integer can be given which means a single dimension `Tensor` with given
width. The `Tensor` representing the column will have the shape of
[batch_size] + `shape`.
default_value: A single value compatible with `dtype` or an iterable of
values compatible with `dtype` which the column takes on during
`tf.Example` parsing if data is missing. A default value of `None` will
cause `tf.io.parse_example` to fail if an example does not contain this
column. If a single value is provided, the same value will be applied as
the default value for every item. If an iterable of values is provided,
the shape of the `default_value` should be equal to the given `shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `NumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int
ValueError: if any dimension in shape is not a positive integer
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = _check_shape(shape, key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
default_value = fc_utils.check_default_value(
shape, default_value, dtype, key)
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
fc_utils.assert_key_is_string(key)
return NumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
@tf_export('feature_column.bucketized_column')
def bucketized_column(source_column, boundaries):
"""Represents discretized dense input.
Buckets include the left boundary, and exclude the right boundary. Namely,
`boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`,
`[1., 2.)`, and `[2., +inf)`.
For example, if the inputs are
```python
boundaries = [0, 10, 100]
input tensor = [[-5, 10000]
[150, 10]
[5, 100]]
```
then the output will be
```python
output = [[0, 3]
[3, 2]
[1, 3]]
```
Example:
```python
price = numeric_column('price')
bucketized_price = bucketized_column(price, boundaries=[...])
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
columns = [bucketized_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
`bucketized_column` can also be crossed with another categorical column using
`crossed_column`:
```python
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
# 'keywords' is a string feature.
price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K)
columns = [price_x_keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Args:
source_column: A one-dimensional dense column which is generated with
`numeric_column`.
boundaries: A sorted list or tuple of floats specifying the boundaries.
Returns:
A `BucketizedColumn`.
Raises:
ValueError: If `source_column` is not a numeric column, or if it is not
one-dimensional.
ValueError: If `boundaries` is not a sorted list or tuple.
"""
if not isinstance(source_column, (NumericColumn, fc_old._NumericColumn)): # pylint: disable=protected-access
raise ValueError(
'source_column must be a column generated with numeric_column(). '
'Given: {}'.format(source_column))
if len(source_column.shape) > 1:
raise ValueError(
'source_column must be one-dimensional column. '
'Given: {}'.format(source_column))
if not boundaries:
raise ValueError('boundaries must not be empty.')
if not (isinstance(boundaries, list) or isinstance(boundaries, tuple)):
raise ValueError('boundaries must be a sorted list.')
for i in range(len(boundaries) - 1):
if boundaries[i] >= boundaries[i + 1]:
raise ValueError('boundaries must be a sorted list.')
return BucketizedColumn(source_column, tuple(boundaries))
@tf_export('feature_column.categorical_column_with_hash_bucket')
def categorical_column_with_hash_bucket(key,
hash_bucket_size,
dtype=dtypes.string):
"""Represents sparse feature where ids are set by hashing.
Use this when your sparse features are in string or integer format, and you
want to distribute your inputs into a finite number of buckets by hashing.
output_id = Hash(input_feature_string) % bucket_size for string type input.
For int type input, the value is converted to its string representation first
and then hashed by the same formula.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example:
```python
keywords = categorical_column_with_hash_bucket("keywords", 10K)
columns = [keywords, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
# or
keywords_embedded = embedding_column(keywords, 16)
columns = [keywords_embedded, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `HashedCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
if hash_bucket_size is None:
raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key))
if hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be at least 1. '
'hash_bucket_size: {}, key: {}'.format(
hash_bucket_size, key))
fc_utils.assert_key_is_string(key)
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
return HashedCategoricalColumn(key, hash_bucket_size, dtype)
@tf_export(v1=['feature_column.categorical_column_with_vocabulary_file'])
def categorical_column_with_vocabulary_file(key,
vocabulary_file,
vocabulary_size=None,
num_oov_buckets=0,
default_value=None,
dtype=dtypes.string):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File '/us/states.txt' contains 51 lines - the first line is 'XX', and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX'
in input, and other values missing from the file, will be assigned ID 0. All
others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return categorical_column_with_vocabulary_file_v2(
key, vocabulary_file, vocabulary_size,
dtype, default_value,
num_oov_buckets)
@tf_export('feature_column.categorical_column_with_vocabulary_file', v1=[])
def categorical_column_with_vocabulary_file_v2(key,
vocabulary_file,
vocabulary_size=None,
dtype=dtypes.string,
default_value=None,
num_oov_buckets=0):
"""A `CategoricalColumn` with a vocabulary file.
Use this when your inputs are in string or integer format, and you have a
vocabulary file that maps each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
File `'/us/states.txt'` contains 50 lines, each with a 2-character U.S. state
abbreviation. All inputs with values in that file are assigned an ID 0-49,
corresponding to its line number. All other values are hashed and assigned an
ID 50-54.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
Example with `default_value`:
File `'/us/states.txt'` contains 51 lines - the first line is `'XX'`, and the
other 50 each have a 2-character U.S. state abbreviation. Both a literal
`'XX'` in input, and other values missing from the file, will be assigned
ID 0. All others are assigned the corresponding line number 1-50.
```python
states = categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=51,
default_value=0)
columns = [states, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(states, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
dtype: The type of features. Only string and integer types are supported.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
Returns:
A `CategoricalColumn` with a vocabulary file.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
if not vocabulary_file:
raise ValueError('Missing vocabulary_file in {}.'.format(key))
if vocabulary_size is None:
if not gfile.Exists(vocabulary_file):
raise ValueError('vocabulary_file in {} does not exist.'.format(key))
with gfile.GFile(vocabulary_file) as f:
vocabulary_size = sum(1 for _ in f)
logging.info(
'vocabulary_size = %d in %s is inferred from the number of elements '
'in the vocabulary_file %s.', vocabulary_size, key, vocabulary_file)
# `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`.
if vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size in {}.'.format(key))
if num_oov_buckets:
if default_value is not None:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyFileCategoricalColumn(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets,
default_value=-1 if default_value is None else default_value,
dtype=dtype)
@tf_export('feature_column.categorical_column_with_vocabulary_list')
def categorical_column_with_vocabulary_list(key,
vocabulary_list,
dtype=None,
default_value=-1,
num_oov_buckets=0):
"""A `CategoricalColumn` with in-memory vocabulary.
Use this when your inputs are in string or integer format, and you have an
in-memory vocabulary mapping each value to an integer ID. By default,
out-of-vocabulary values are ignored. Use either (but not both) of
`num_oov_buckets` and `default_value` to specify how to include
out-of-vocabulary values.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
Example with `num_oov_buckets`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-3 corresponding to its index (e.g., input 'B' produces output 2). All other
inputs are hashed and assigned an ID 4-5.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Example with `default_value`:
In the following example, each input in `vocabulary_list` is assigned an ID
0-4 corresponding to its index (e.g., input 'B' produces output 3). All other
inputs are assigned `default_value` 0.
```python
colors = categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0)
columns = [colors, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
And to make an embedding with either:
```python
columns = [embedding_column(colors, 3),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the column
name and the dictionary key for feature parsing configs, feature `Tensor`
objects, and feature columns.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported. If
`None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `CategoricalColumn` with in-memory vocabulary.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
if (vocabulary_list is None) or (len(vocabulary_list) < 1):
raise ValueError(
'vocabulary_list {} must be non-empty, column_name: {}'.format(
vocabulary_list, key))
if len(set(vocabulary_list)) != len(vocabulary_list):
raise ValueError(
'Duplicate keys in vocabulary_list {}, column_name: {}'.format(
vocabulary_list, key))
vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype)
if num_oov_buckets:
if default_value != -1:
raise ValueError(
'Can\'t specify both num_oov_buckets and default_value in {}.'.format(
key))
if num_oov_buckets < 0:
raise ValueError('Invalid num_oov_buckets {} in {}.'.format(
num_oov_buckets, key))
fc_utils.assert_string_or_int(
vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key))
if dtype is None:
dtype = vocabulary_dtype
elif dtype.is_integer != vocabulary_dtype.is_integer:
raise ValueError(
'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format(
dtype, vocabulary_dtype, key))
fc_utils.assert_string_or_int(dtype, prefix='column_name: {}'.format(key))
fc_utils.assert_key_is_string(key)
return VocabularyListCategoricalColumn(
key=key,
vocabulary_list=tuple(vocabulary_list),
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets)
@tf_export('feature_column.categorical_column_with_identity')
def categorical_column_with_identity(key, num_buckets, default_value=None):
"""A `CategoricalColumn` that returns identity values.
Use this when your inputs are integers in the range `[0, num_buckets)`, and
you want to use the input value itself as the categorical ID. Values outside
this range will result in `default_value` if specified, otherwise it will
fail.
Typically, this is used for contiguous ranges of integer indexes, but
it doesn't have to be. This might be inefficient, however, if many of IDs
are unused. Consider `categorical_column_with_hash_bucket` in that case.
For input dictionary `features`, `features[key]` is either `Tensor` or
`SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int
and `''` for string, which will be dropped by this feature column.
In the following examples, each input in the range `[0, 1000000)` is assigned
the same value. All other inputs are assigned `default_value` 0. Note that a
literal 0 in inputs will result in the same default ID.
Linear model:
```python
video_id = categorical_column_with_identity(
key='video_id', num_buckets=1000000, default_value=0)
columns = [video_id, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
Embedding for a DNN model:
```python
columns = [embedding_column(video_id, 9),...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
```
Args:
key: A unique string identifying the input feature. It is used as the
column name and the dictionary key for feature parsing configs, feature
`Tensor` objects, and feature columns.
num_buckets: Range of inputs and outputs is `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace inputs in that range.
Returns:
A `CategoricalColumn` that returns identity values.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
if num_buckets < 1:
raise ValueError(
'num_buckets {} < 1, column_name {}'.format(num_buckets, key))
if (default_value is not None) and (
(default_value < 0) or (default_value >= num_buckets)):
raise ValueError(
'default_value {} not in range [0, {}), column_name {}'.format(
default_value, num_buckets, key))
fc_utils.assert_key_is_string(key)
return IdentityCategoricalColumn(
key=key, number_buckets=num_buckets, default_value=default_value)
@tf_export('feature_column.indicator_column')
def indicator_column(categorical_column):
"""Represents multi-hot representation of given categorical column.
- For DNN model, `indicator_column` can be used to wrap any
`categorical_column_*` (e.g., to feed to DNN). Consider to Use
`embedding_column` if the number of buckets/unique(values) are large.
- For Wide (aka linear) model, `indicator_column` is the internal
representation for categorical column when passing categorical column
directly (as any element in feature_columns) to `linear_model`. See
`linear_model` for details.
```python
name = indicator_column(categorical_column_with_vocabulary_list(
'name', ['bob', 'george', 'wanda'])
columns = [name, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
dense_tensor = input_layer(features, columns)
dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"]
dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"]
dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"]
```
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` or `crossed_column` functions.
Returns:
An `IndicatorColumn`.
"""
return IndicatorColumn(categorical_column)
@tf_export('feature_column.weighted_categorical_column')
def weighted_categorical_column(categorical_column,
weight_feature_key,
dtype=dtypes.float32):
"""Applies weight values to a `CategoricalColumn`.
Use this when each of your sparse inputs has both an ID and a value. For
example, if you're representing text documents as a collection of word
frequencies, you can provide 2 parallel sparse input features ('terms' and
'frequencies' below).
Example:
Input `tf.Example` objects:
```proto
[
features {
feature {
key: "terms"
value {bytes_list {value: "very" value: "model"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.3 value: 0.1}}
}
},
features {
feature {
key: "terms"
value {bytes_list {value: "when" value: "course" value: "human"}}
}
feature {
key: "frequencies"
value {float_list {value: 0.4 value: 0.1 value: 0.2}}
}
}
]
```
```python
categorical_column = categorical_column_with_hash_bucket(
column_name='terms', hash_bucket_size=1000)
weighted_column = weighted_categorical_column(
categorical_column=categorical_column, weight_feature_key='frequencies')
columns = [weighted_column, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction, _, _ = linear_model(features, columns)
```
This assumes the input dictionary contains a `SparseTensor` for key
'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have
the same indices and dense shape.
Args:
categorical_column: A `CategoricalColumn` created by
`categorical_column_with_*` functions.
weight_feature_key: String key for weight values.
dtype: Type of weights, such as `tf.float32`. Only float and integer weights
are supported.
Returns:
A `CategoricalColumn` composed of two sparse features: one represents id,
the other represents weight (value) of the id feature in that example.
Raises:
ValueError: if `dtype` is not convertible to float.
"""
if (dtype is None) or not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype {} is not convertible to float.'.format(dtype))
return WeightedCategoricalColumn(
categorical_column=categorical_column,
weight_feature_key=weight_feature_key,
dtype=dtype)
@tf_export('feature_column.crossed_column')
def crossed_column(keys, hash_bucket_size, hash_key=None):
"""Returns a column for performing crosses of categorical features.
Crossed features will be hashed according to `hash_bucket_size`. Conceptually,
the transformation can be thought of as:
Hash(cartesian product of features) % `hash_bucket_size`
For example, if the input features are:
* SparseTensor referred by first key:
```python
shape = [2, 2]
{
[0, 0]: "a"
[1, 0]: "b"
[1, 1]: "c"
}
```
* SparseTensor referred by second key:
```python
shape = [2, 1]
{
[0, 0]: "d"
[1, 0]: "e"
}
```
then crossed feature will look like:
```python
shape = [2, 2]
{
[0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size
[1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size
[1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size
}
```
Here is an example to create a linear model with crosses of string features:
```python
keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
You could also use vocabulary lookup before crossing:
```python
keywords = categorical_column_with_vocabulary_file(
'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)
keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)
columns = [keywords_x_doc_terms, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
If an input feature is of numeric type, you can use
`categorical_column_with_identity`, or `bucketized_column`, as in the example:
```python
# vertical_id is an integer categorical feature.
vertical_id = categorical_column_with_identity('vertical_id', 10K)
price = numeric_column('price')
# bucketized_column converts numerical feature to a categorical one.
bucketized_price = bucketized_column(price, boundaries=[...])
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
columns = [vertical_id_x_price, ...]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
linear_prediction = linear_model(features, columns)
```
To use crossed column in DNN model, you need to add it in an embedding column
as in this example:
```python
vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)
vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)
dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])
```
Args:
keys: An iterable identifying the features to be crossed. Each element can
be either:
* string: Will use the corresponding feature which must be of string type.
* `CategoricalColumn`: Will use the transformed tensor produced by this
column. Does not support hashed categorical column.
hash_bucket_size: An int > 1. The number of buckets.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseCrossOp (optional).
Returns:
A `CrossedColumn`.
Raises:
ValueError: If `len(keys) < 2`.
ValueError: If any of the keys is neither a string nor `CategoricalColumn`.
ValueError: If any of the keys is `HashedCategoricalColumn`.
ValueError: If `hash_bucket_size < 1`.
"""
if not hash_bucket_size or hash_bucket_size < 1:
raise ValueError('hash_bucket_size must be > 1. '
'hash_bucket_size: {}'.format(hash_bucket_size))
if not keys or len(keys) < 2:
raise ValueError(
'keys must be a list with length > 1. Given: {}'.format(keys))
for key in keys:
if (not isinstance(key, six.string_types) and
not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))): # pylint: disable=protected-access
raise ValueError(
'Unsupported key type. All keys must be either string, or '
'categorical column except HashedCategoricalColumn. '
'Given: {}'.format(key))
if isinstance(key,
(HashedCategoricalColumn, fc_old._HashedCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'categorical_column_with_hash_bucket is not supported for crossing. '
'Hashing before crossing will increase probability of collision. '
'Instead, use the feature name as a string. Given: {}'.format(key))
return CrossedColumn(
keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)
@six.add_metaclass(abc.ABCMeta)
class FeatureColumn(object):
"""Represents a feature column abstraction.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
To distinguish between the concept of a feature family and a specific binary
feature within a family, we refer to a feature family like "country" as a
feature column. For example, we can have a feature in a `tf.Example` format:
{key: "country", value: [ "US" ]}
In this example the value of feature is "US" and "country" refers to the
column of the feature.
This class is an abstract class. Users should not create instances of this.
"""
@abc.abstractproperty
def name(self):
"""Returns string. Used for naming."""
pass
@abc.abstractmethod
def transform_feature(self, transformation_cache, state_manager):
"""Returns intermediate representation (usually a `Tensor`).
Uses `transformation_cache` to create an intermediate representation
(usually a `Tensor`) that other feature columns can use.
Example usage of `transformation_cache`:
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). To access corresponding `Tensor`s,
transformation_cache will be used as follows:
```python
raw_tensor = transformation_cache.get('raw', state_manager)
fc_tensor = transformation_cache.get(input_fc, state_manager)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
"""
pass
@abc.abstractproperty
def parse_example_spec(self):
"""Returns a `tf.Example` parsing spec as dict.
It is used for get_parsing_spec for `tf.io.parse_example`. Returned spec is
a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
supported objects. Please check documentation of `tf.io.parse_example` for
all supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
`FeatureColumn` (input_fc). One possible implementation of
parse_example_spec is as follows:
```python
spec = {'raw': tf.io.FixedLenFeature(...)}
spec.update(input_fc.parse_example_spec)
return spec
```
"""
pass
def create_state(self, state_manager):
"""Uses the `state_manager` to create state for the FeatureColumn.
Args:
state_manager: A `StateManager` to create / access resources such as
lookup tables and variables.
"""
pass
@abc.abstractproperty
def _is_v2_column(self):
"""Returns whether this FeatureColumn is fully conformant to the new API.
This is needed for composition type cases where an EmbeddingColumn etc.
might take in old categorical columns as input and then we want to use the
old API.
"""
pass
@abc.abstractproperty
def parents(self):
"""Returns a list of immediate raw feature and FeatureColumn dependencies.
For example:
# For the following feature columns
a = numeric_column('f1')
c = crossed_column(a, 'f2')
# The expected parents are:
a.parents = ['f1']
c.parents = [a, 'f2']
"""
pass
@abc.abstractmethod
def _get_config(self):
"""Returns the config of the feature column.
A FeatureColumn config is a Python dictionary (serializable) containing the
configuration of a FeatureColumn. The same FeatureColumn can be
reinstantiated later from this configuration.
The config of a feature column does not include information about feature
columns depending on it nor the FeatureColumn class name.
Example with (de)serialization practices followed in this file:
```python
class SerializationExampleFeatureColumn(
FeatureColumn, collections.namedtuple(
'SerializationExampleFeatureColumn',
('dimension', 'parent', 'dtype', 'normalizer_fn'))):
def _get_config(self):
# Create a dict from the namedtuple.
# Python attribute literals can be directly copied from / to the config.
# For example 'dimension', assuming it is an integer literal.
config = dict(zip(self._fields, self))
# (De)serialization of parent FeatureColumns should use the provided
# (de)serialize_feature_column() methods that take care of de-duping.
config['parent'] = serialize_feature_column(self.parent)
# Many objects provide custom (de)serialization e.g: for tf.DType
# tf.DType.name, tf.as_dtype() can be used.
config['dtype'] = self.dtype.name
# Non-trivial dependencies should be Keras-(de)serializable.
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
# This should do the inverse transform from `_get_config` and construct
# the namedtuple.
kwargs = config.copy()
kwargs['parent'] = deserialize_feature_column(
config['parent'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
return cls(**kwargs)
```
Returns:
A serializable Dict that can be used to deserialize the object with
from_config.
"""
pass
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""Creates a FeatureColumn from its config.
This method should be the reverse of `_get_config`, capable of instantiating
the same FeatureColumn from the config dictionary. See `_get_config` for an
example of common (de)serialization practices followed in this file.
TODO(b/118939620): This is a private method until consensus is reached on
supporting object deserialization deduping within Keras.
Args:
config: A Dict config acquired with `_get_config`.
custom_objects: Optional dictionary mapping names (strings) to custom
classes or functions to be considered during deserialization.
columns_by_name: A Dict[String, FeatureColumn] of existing columns in
order to avoid duplication. Should be passed to any calls to
deserialize_feature_column().
Returns:
A FeatureColumn for the input config.
"""
pass
class DenseColumn(FeatureColumn):
"""Represents a column which can be represented as `Tensor`.
Some examples of this type are: numeric_column, embedding_column,
indicator_column.
"""
@abc.abstractproperty
def variable_shape(self):
"""`TensorShape` of `get_dense_tensor`, without batch dimension."""
pass
@abc.abstractmethod
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `Tensor`.
The output of this function will be used by model-builder-functions. For
example the pseudo code of `input_layer` will be like:
```python
def input_layer(features, feature_columns, ...):
outputs = [fc.get_dense_tensor(...) for fc in feature_columns]
return tf.concat(outputs)
```
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
`Tensor` of shape [batch_size] + `variable_shape`.
"""
pass
def is_feature_column_v2(feature_columns):
"""Returns True if all feature columns are V2."""
for feature_column in feature_columns:
if not isinstance(feature_column, FeatureColumn):
return False
if not feature_column._is_v2_column: # pylint: disable=protected-access
return False
return True
def _create_weighted_sum(column, transformation_cache, state_manager,
sparse_combiner, weight_var):
"""Creates a weighted sum for a dense/categorical column for linear_model."""
if isinstance(column, CategoricalColumn):
return _create_categorical_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
sparse_combiner=sparse_combiner,
weight_var=weight_var)
else:
return _create_dense_column_weighted_sum(
column=column,
transformation_cache=transformation_cache,
state_manager=state_manager,
weight_var=weight_var)
def _create_dense_column_weighted_sum(column, transformation_cache,
state_manager, weight_var):
"""Create a weighted sum of a dense column for linear_model."""
tensor = column.get_dense_tensor(transformation_cache, state_manager)
num_elements = column.variable_shape.num_elements()
batch_size = array_ops.shape(tensor)[0]
tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
return math_ops.matmul(tensor, weight_var, name='weighted_sum')
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name
'IdWeightPair', ('id_tensor', 'weight_tensor'))
@abc.abstractproperty
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
pass
@abc.abstractmethod
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
def _create_categorical_column_weighted_sum(
column, transformation_cache, state_manager, sparse_combiner, weight_var):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Create a weighted sum of a categorical column for linear_model.
Note to maintainer: As implementation details, the weighted sum is
implemented via embedding_lookup_sparse toward efficiency. Mathematically,
they are the same.
To be specific, conceptually, categorical column can be treated as multi-hot
vector. Say:
```python
x = [0 0 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `c` in this case, which is same as `w[2]`.
Another example is
```python
x = [0 1 1] # categorical column input
w = [a b c] # weights
```
The weighted sum is `b + c` in this case, which is same as `w[2] + w[3]`.
For both cases, we can implement weighted sum via embedding_lookup with
sparse_combiner = "sum".
"""
sparse_tensors = column.get_sparse_tensors(transformation_cache,
state_manager)
id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
array_ops.shape(sparse_tensors.id_tensor)[0], -1
])
weight_tensor = sparse_tensors.weight_tensor
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor, [array_ops.shape(weight_tensor)[0], -1])
return embedding_ops.safe_embedding_lookup_sparse(
weight_var,
id_tensor,
sparse_weights=weight_tensor,
combiner=sparse_combiner,
name='weighted_sum')
class SequenceDenseColumn(FeatureColumn):
"""Represents dense sequence data."""
TensorSequenceLengthPair = collections.namedtuple( # pylint: disable=invalid-name
'TensorSequenceLengthPair', ('dense_tensor', 'sequence_length'))
@abc.abstractmethod
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
pass
class FeatureTransformationCache(object):
"""Handles caching of transformations while building the model.
`FeatureColumn` specifies how to digest an input column to the network. Some
feature columns require data transformations. This class caches those
transformations.
Some features may be used in more than one place. For example, one can use a
bucketized feature by itself and a cross with it. In that case we
should create only one bucketization op instead of creating ops for each
feature column separately. To handle re-use of transformed columns,
`FeatureTransformationCache` caches all previously transformed columns.
Example:
We're trying to use the following `FeatureColumn`s:
```python
bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...)
keywords = fc.categorical_column_with_hash_buckets("keywords", ...)
age_X_keywords = fc.crossed_column([bucketized_age, "keywords"])
... = linear_model(features,
[bucketized_age, keywords, age_X_keywords]
```
If we transform each column independently, then we'll get duplication of
bucketization (one for cross, one for bucketization itself).
The `FeatureTransformationCache` eliminates this duplication.
"""
def __init__(self, features):
"""Creates a `FeatureTransformationCache`.
Args:
features: A mapping from feature column to objects that are `Tensor` or
`SparseTensor`, or can be converted to same via
`sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key
signifies a base feature (not-transformed). A `FeatureColumn` key
means that this `Tensor` is the output of an existing `FeatureColumn`
which can be reused.
"""
self._features = features.copy()
self._feature_tensors = {}
def get(self, key, state_manager):
"""Returns a `Tensor` for the given key.
A `str` key is used to access a base feature (not-transformed). When a
`FeatureColumn` is passed, the transformed feature is returned if it
already exists, otherwise the given `FeatureColumn` is asked to provide its
transformed output, which is then cached.
Args:
key: a `str` or a `FeatureColumn`.
state_manager: A StateManager object that holds the FeatureColumn state.
Returns:
The transformed `Tensor` corresponding to the `key`.
Raises:
ValueError: if key is not found or a transformed `Tensor` cannot be
computed.
"""
if key in self._feature_tensors:
# FeatureColumn is already transformed or converted.
return self._feature_tensors[key]
if key in self._features:
feature_tensor = self._get_raw_feature_as_tensor(key)
self._feature_tensors[key] = feature_tensor
return feature_tensor
if isinstance(key, six.string_types):
raise ValueError('Feature {} is not in features dictionary.'.format(key))
if not isinstance(key, FeatureColumn):
raise TypeError('"key" must be either a "str" or "FeatureColumn". '
'Provided: {}'.format(key))
column = key
logging.debug('Transforming feature_column %s.', column)
transformed = column.transform_feature(self, state_manager)
if transformed is None:
raise ValueError('Column {} is not supported.'.format(column.name))
self._feature_tensors[column] = transformed
return transformed
def _get_raw_feature_as_tensor(self, key):
"""Gets the raw_feature (keyed by `key`) as `tensor`.
The raw feature is converted to (sparse) tensor and maybe expand dim.
For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if
the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will
error out as it is not supported.
Args:
key: A `str` key to access the raw feature.
Returns:
A `Tensor` or `SparseTensor`.
Raises:
ValueError: if the raw feature has rank 0.
"""
raw_feature = self._features[key]
feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
raw_feature)
def expand_dims(input_tensor):
# Input_tensor must have rank 1.
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return sparse_ops.sparse_reshape(
input_tensor, [array_ops.shape(input_tensor)[0], 1])
else:
return array_ops.expand_dims(input_tensor, -1)
rank = feature_tensor.get_shape().ndims
if rank is not None:
if rank == 0:
raise ValueError(
'Feature (key: {}) cannot have rank 0. Give: {}'.format(
key, feature_tensor))
return feature_tensor if rank != 1 else expand_dims(feature_tensor)
# Handle dynamic rank.
with ops.control_dependencies([
check_ops.assert_positive(
array_ops.rank(feature_tensor),
message='Feature (key: {}) cannot have rank 0. Given: {}'.format(
key, feature_tensor))]):
return control_flow_ops.cond(
math_ops.equal(1, array_ops.rank(feature_tensor)),
lambda: expand_dims(feature_tensor),
lambda: feature_tensor)
# TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py
def _to_sparse_input_and_drop_ignore_values(input_tensor, ignore_value=None):
"""Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells.
If `input_tensor` is already a `SparseTensor`, just return it.
Args:
input_tensor: A string or integer `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the resulting `SparseTensor`. If `None`, default value of
`dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`).
Returns:
A `SparseTensor` with the same shape as `input_tensor`.
Raises:
ValueError: when `input_tensor`'s rank is `None`.
"""
input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
input_tensor)
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
return input_tensor
with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)):
if ignore_value is None:
if input_tensor.dtype == dtypes.string:
# Exception due to TF strings are converted to numpy objects by default.
ignore_value = ''
elif input_tensor.dtype.is_integer:
ignore_value = -1 # -1 has a special meaning of missing feature
else:
# NOTE: `as_numpy_dtype` is a property, so with the parentheses this is
# constructing a new numpy object of the given type, which yields the
# default value for that type.
ignore_value = input_tensor.dtype.as_numpy_dtype()
ignore_value = math_ops.cast(
ignore_value, input_tensor.dtype, name='ignore_value')
indices = array_ops.where(
math_ops.not_equal(input_tensor, ignore_value), name='indices')
return sparse_tensor_lib.SparseTensor(
indices=indices,
values=array_ops.gather_nd(input_tensor, indices, name='values'),
dense_shape=array_ops.shape(
input_tensor, out_type=dtypes.int64, name='dense_shape'))
def _normalize_feature_columns(feature_columns):
"""Normalizes the `feature_columns` input.
This method converts the `feature_columns` to list type as best as it can. In
addition, verifies the type and other parts of feature_columns, required by
downstream library.
Args:
feature_columns: The raw feature columns, usually passed by users.
Returns:
The normalized feature column list.
Raises:
ValueError: for any invalid inputs, such as empty, duplicated names, etc.
"""
if isinstance(feature_columns, FeatureColumn):
feature_columns = [feature_columns]
if isinstance(feature_columns, collections.Iterator):
feature_columns = list(feature_columns)
if isinstance(feature_columns, dict):
raise ValueError('Expected feature_columns to be iterable, found dict.')
for column in feature_columns:
if not isinstance(column, FeatureColumn):
raise ValueError('Items of feature_columns must be a FeatureColumn. '
'Given (type {}): {}.'.format(type(column), column))
if not feature_columns:
raise ValueError('feature_columns must not be empty.')
name_to_column = {}
for column in feature_columns:
if column.name in name_to_column:
raise ValueError('Duplicate feature column name found for columns: {} '
'and {}. This usually means that these columns refer to '
'same base feature. Either one must be discarded or a '
'duplicated but renamed item must be inserted in '
'features dict.'.format(column,
name_to_column[column.name]))
name_to_column[column.name] = column
return sorted(feature_columns, key=lambda x: x.name)
class NumericColumn(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'NumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""see `numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
Raises:
ValueError: If a SparseTensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(self.shape)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = generic_utils.serialize_keras_object(
self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['normalizer_fn'] = generic_utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class BucketizedColumn(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('BucketizedColumn',
('source_column', 'boundaries'))):
"""See `bucketized_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.source_column, FeatureColumn) and
self.source_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def _get_dense_tensor_for_input_tensor(self, input_tensor):
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def _get_sparse_tensors_for_input_tensor(self, input_tensor):
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]),
(-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor, (-1,)) +
(len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices,
values=bucket_indices,
dense_shape=dense_shape)
return CategoricalColumn.IdWeightPair(sparse_tensor, None)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
class EmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'EmbeddingColumn',
('categorical_column', 'dimension', 'combiner', 'initializer',
'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable'))):
"""See `embedding_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""Transforms underlying `categorical_column`."""
return transformation_cache.get(self.categorical_column, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return inputs.get(self.categorical_column)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.vector(self.dimension)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def create_state(self, state_manager):
"""Creates the embedding lookup variable."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
state_manager.create_variable(
self,
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
trainable=self.trainable,
use_resource=True,
initializer=self.initializer)
def _get_dense_tensor_internal_helper(self, sparse_tensors,
embedding_weights):
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
if self.ckpt_to_load_from is not None:
to_restore = embedding_weights
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
self.tensor_name_in_ckpt: to_restore
})
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def _get_dense_tensor_internal(self, sparse_tensors, state_manager):
"""Private method that follows the signature of get_dense_tensor."""
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def _old_get_dense_tensor_internal(self, sparse_tensors, weight_collections,
trainable):
"""Private method that follows the signature of _get_dense_tensor."""
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return self._get_dense_tensor_internal_helper(sparse_tensors,
embedding_weights)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns tensor after doing the embedding lookup.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Embedding lookup tensor.
Raises:
ValueError: `categorical_column` is SequenceCategoricalColumn.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_dense_tensor_internal(sparse_tensors, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access
inputs, weight_collections, trainable)
return self._old_get_dense_tensor_internal(sparse_tensors,
weight_collections, trainable)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
dense_tensor = self._get_dense_tensor_internal(sparse_tensors,
state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
dense_tensor = self._old_get_dense_tensor_internal(
sparse_tensors,
weight_collections=weight_collections,
trainable=trainable)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['initializer'] = initializers.serialize(self.initializer)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['initializer'] = initializers.deserialize(
config['initializer'], custom_objects=custom_objects)
return cls(**kwargs)
def _raise_shared_embedding_column_error():
raise ValueError('SharedEmbeddingColumns are not supported in '
'`linear_model` or `input_layer`. Please use '
'`DenseFeatures` or `LinearModel` instead.')
class SharedEmbeddingColumnCreator(tracking.AutoTrackable):
def __init__(self,
dimension,
initializer,
ckpt_to_load_from,
tensor_name_in_ckpt,
num_buckets,
trainable,
name='shared_embedding_column_creator'):
self._dimension = dimension
self._initializer = initializer
self._ckpt_to_load_from = ckpt_to_load_from
self._tensor_name_in_ckpt = tensor_name_in_ckpt
self._num_buckets = num_buckets
self._trainable = trainable
self._name = name
# Map from graph keys to embedding_weight variables.
self._embedding_weights = {}
def __call__(self, categorical_column, combiner, max_norm):
return SharedEmbeddingColumn(categorical_column, self, combiner, max_norm)
@property
def embedding_weights(self):
key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if key not in self._embedding_weights:
embedding_shape = (self._num_buckets, self._dimension)
var = variable_scope.get_variable(
name=self._name,
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self._initializer,
trainable=self._trainable)
if self._ckpt_to_load_from is not None:
to_restore = var
if isinstance(to_restore, variables.PartitionedVariable):
to_restore = to_restore._get_variable_list() # pylint: disable=protected-access
checkpoint_utils.init_from_checkpoint(
self._ckpt_to_load_from, {self._tensor_name_in_ckpt: to_restore})
self._embedding_weights[key] = var
return self._embedding_weights[key]
@property
def dimension(self):
return self._dimension
class SharedEmbeddingColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'SharedEmbeddingColumn',
('categorical_column', 'shared_embedding_column_creator', 'combiner',
'max_norm'))):
"""See `embedding_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_shared_embedding'.format(self.categorical_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
def _parse_example_spec(self):
return _raise_shared_embedding_column_error()
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return transformation_cache.get(self.categorical_column, state_manager)
def _transform_feature(self, inputs):
return _raise_shared_embedding_column_error()
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.vector(self.shared_embedding_column_creator.dimension)
@property
def _variable_shape(self):
return _raise_shared_embedding_column_error()
def _get_dense_tensor_internal(self, transformation_cache, state_manager):
"""Private method that follows the signature of _get_dense_tensor."""
# This method is called from a variable_scope with name _var_scope_name,
# which is shared among all shared embeddings. Open a name_scope here, so
# that the ops for different columns have distinct names.
with ops.name_scope(None, default_name=self.name):
# Get sparse IDs and weights.
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sparse_ids = sparse_tensors.id_tensor
sparse_weights = sparse_tensors.weight_tensor
embedding_weights = self.shared_embedding_column_creator.embedding_weights
# Return embedding lookup result.
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights=embedding_weights,
sparse_ids=sparse_ids,
sparse_weights=sparse_weights,
combiner=self.combiner,
name='%s_weights' % self.name,
max_norm=self.max_norm)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns the embedding lookup result."""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
return self._get_dense_tensor_internal(transformation_cache, state_manager)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
return _raise_shared_embedding_column_error()
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In embedding_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
dense_tensor = self._get_dense_tensor_internal(transformation_cache,
state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
return _raise_shared_embedding_column_error()
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
def _check_shape(shape, key):
"""Returns shape if it's valid, raises error otherwise."""
assert shape is not None
if not nest.is_sequence(shape):
shape = [shape]
shape = tuple(shape)
for dimension in shape:
if not isinstance(dimension, int):
raise TypeError('shape dimensions must be integer. '
'shape: {}, key: {}'.format(shape, key))
if dimension < 1:
raise ValueError('shape dimensions must be greater than 0. '
'shape: {}, key: {}'.format(shape, key))
return shape
class HashedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('HashedCategoricalColumn',
('key', 'hash_bucket_size', 'dtype'))):
"""see `categorical_column_with_hash_bucket`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Hashes the values in the feature_column."""
if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError('SparseColumn input must be a SparseTensor.')
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
if self.dtype == dtypes.string:
sparse_values = input_tensor.values
else:
sparse_values = string_ops.as_string(input_tensor.values)
sparse_id_values = string_ops.string_to_hash_bucket_fast(
sparse_values, self.hash_bucket_size, name='lookup')
return sparse_tensor_lib.SparseTensor(
input_tensor.indices, sparse_id_values, input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Hashes the values in the feature_column."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyFileCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('VocabularyFileCategoricalColumn',
('key', 'vocabulary_file', 'vocabulary_size',
'num_oov_buckets', 'dtype', 'default_value'))):
"""See `categorical_column_with_vocabulary_file`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Creates a lookup table for the vocabulary."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_file` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
# TODO(rohanj): Use state manager to manage the index table creation.
return lookup_ops.index_table_from_file(
vocabulary_file=self.vocabulary_file,
num_oov_buckets=self.num_oov_buckets,
vocab_size=self.vocabulary_size,
default_value=self.default_value,
key_dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.vocabulary_size + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class VocabularyListCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'VocabularyListCategoricalColumn',
('key', 'vocabulary_list', 'dtype', 'default_value', 'num_oov_buckets'))
):
"""See `categorical_column_with_vocabulary_list`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Creates a lookup table for the vocabulary list."""
if self.dtype.is_integer != input_tensor.dtype.is_integer:
raise ValueError(
'Column dtype and SparseTensors dtype must be compatible. '
'key: {}, column dtype: {}, tensor dtype: {}'.format(
self.key, self.dtype, input_tensor.dtype))
fc_utils.assert_string_or_int(
input_tensor.dtype,
prefix='column_name: {} input_tensor'.format(self.key))
key_dtype = self.dtype
if input_tensor.dtype.is_integer:
# `index_table_from_tensor` requires 64-bit integer keys.
key_dtype = dtypes.int64
input_tensor = math_ops.cast(input_tensor, dtypes.int64)
# TODO(rohanj): Use state manager to manage the index table creation.
return lookup_ops.index_table_from_tensor(
vocabulary_list=tuple(self.vocabulary_list),
default_value=self.default_value,
num_oov_buckets=self.num_oov_buckets,
dtype=key_dtype,
name='{}_lookup'.format(self.key)).lookup(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""Creates a lookup table for the vocabulary list."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return len(self.vocabulary_list) + self.num_oov_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class IdentityCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('IdentityCategoricalColumn',
('key', 'number_buckets', 'default_value'))):
"""See `categorical_column_with_identity`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
"""Returns a SparseTensor with identity values."""
if not input_tensor.dtype.is_integer:
raise ValueError(
'Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = math_ops.cast(input_tensor.values, dtypes.int64, name='values')
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
if self.default_value is None:
# Fail if values are out-of-range.
assert_less = check_ops.assert_less(
values, num_buckets, data=(values, num_buckets),
name='assert_less_than_num_buckets')
assert_greater = check_ops.assert_greater_equal(
values, zero, data=(values,),
name='assert_greater_or_equal_0')
with ops.control_dependencies((assert_less, assert_greater)):
values = array_ops.identity(values)
else:
# Assign default for out-of-range values.
values = array_ops.where(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
def transform_feature(self, transformation_cache, state_manager):
"""Returns a SparseTensor with identity values."""
input_tensor = _to_sparse_input_and_drop_ignore_values(
transformation_cache.get(self.key, state_manager))
return self._transform_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
return self._transform_input_tensor(input_tensor)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.number_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
return dict(zip(self._fields, self))
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
return cls(**kwargs)
class WeightedCategoricalColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple(
'WeightedCategoricalColumn',
('categorical_column', 'weight_feature_key', 'dtype'))):
"""See `weighted_categorical_column`."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_weighted_by_{}'.format(
self.categorical_column.name, self.weight_feature_key)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = self.categorical_column.parse_example_spec
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
config = self.categorical_column._parse_example_spec # pylint: disable=protected-access
if self.weight_feature_key in config:
raise ValueError('Parse config {} already exists for {}.'.format(
config[self.weight_feature_key], self.weight_feature_key))
config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype)
return config
@property
def num_buckets(self):
"""See `DenseColumn` base class."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _transform_weight_tensor(self, weight_tensor):
if weight_tensor is None:
raise ValueError('Missing weights {}.'.format(self.weight_feature_key))
weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
weight_tensor)
if self.dtype != weight_tensor.dtype.base_dtype:
raise ValueError('Bad dtype, expected {}, but got {}.'.format(
self.dtype, weight_tensor.dtype))
if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor):
# The weight tensor can be a regular Tensor. In this case, sparsify it.
weight_tensor = _to_sparse_input_and_drop_ignore_values(
weight_tensor, ignore_value=0.0)
if not weight_tensor.dtype.is_floating:
weight_tensor = math_ops.cast(weight_tensor, dtypes.float32)
return weight_tensor
def transform_feature(self, transformation_cache, state_manager):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = transformation_cache.get(self.weight_feature_key,
state_manager)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (transformation_cache.get(self.categorical_column, state_manager),
weight_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Applies weights to tensor generated from `categorical_column`'."""
weight_tensor = inputs.get(self.weight_feature_key)
weight_tensor = self._transform_weight_tensor(weight_tensor)
return (inputs.get(self.categorical_column), weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
tensors = transformation_cache.get(self, state_manager)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
del weight_collections
del trainable
tensors = inputs.get(self)
return CategoricalColumn.IdWeightPair(tensors[0], tensors[1])
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column, self.weight_feature_key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
class CrossedColumn(
CategoricalColumn,
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('CrossedColumn',
('keys', 'hash_bucket_size', 'hash_key'))):
"""See `crossed_column`."""
@property
def _is_v2_column(self):
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
continue
if not isinstance(key, FeatureColumn):
return False
if not key._is_v2_column: # pylint: disable=protected-access
return False
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
feature_names = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, (FeatureColumn, fc_old._FeatureColumn)): # pylint: disable=protected-access
feature_names.append(key.name)
else: # key must be a string
feature_names.append(key)
return '_X_'.join(sorted(feature_names))
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
config = {}
for key in self.keys:
if isinstance(key, FeatureColumn):
config.update(key.parse_example_spec)
elif isinstance(key, fc_old._FeatureColumn): # pylint: disable=protected-access
config.update(key._parse_example_spec) # pylint: disable=protected-access
else: # key must be a string
config.update({key: parsing_ops.VarLenFeature(dtypes.string)})
return config
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def transform_feature(self, transformation_cache, state_manager):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(transformation_cache.get(key, state_manager))
elif isinstance(key, (fc_old._CategoricalColumn, CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key.get_sparse_tensors(transformation_cache,
state_manager)
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Generates a hashed sparse cross from the input tensors."""
feature_tensors = []
for key in _collect_leaf_level_keys(self):
if isinstance(key, six.string_types):
feature_tensors.append(inputs.get(key))
elif isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn)): # pylint: disable=protected-access
ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access
if ids_and_weights.weight_tensor is not None:
raise ValueError(
'crossed_column does not support weight_tensor, but the given '
'column populates weight_tensor. '
'Given column: {}'.format(key.name))
feature_tensors.append(ids_and_weights.id_tensor)
else:
raise ValueError('Unsupported column type. Given: {}'.format(key))
return sparse_ops.sparse_cross_hashed(
inputs=feature_tensors,
num_buckets=self.hash_bucket_size,
hash_key=self.hash_key)
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.hash_bucket_size
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def get_sparse_tensors(self, transformation_cache, state_manager):
"""See `CategoricalColumn` base class."""
return CategoricalColumn.IdWeightPair(
transformation_cache.get(self, state_manager), None)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
"""See `CategoricalColumn` base class."""
del weight_collections
del trainable
return CategoricalColumn.IdWeightPair(inputs.get(self), None)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return list(self.keys)
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['keys'] = tuple([serialize_feature_column(fc) for fc in self.keys])
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['keys'] = tuple([
deserialize_feature_column(c, custom_objects, columns_by_name)
for c in config['keys']
])
return cls(**kwargs)
def _collect_leaf_level_keys(cross):
"""Collects base keys by expanding all nested crosses.
Args:
cross: A `CrossedColumn`.
Returns:
A list of strings or `CategoricalColumn` instances.
"""
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
class IndicatorColumn(
DenseColumn,
SequenceDenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._SequenceDenseColumn, # pylint: disable=protected-access
collections.namedtuple('IndicatorColumn', ('categorical_column'))):
"""Represents a one-hot column for use in deep networks.
Args:
categorical_column: A `CategoricalColumn` which is created by
`categorical_column_with_*` function.
"""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_indicator'.format(self.categorical_column.name)
def _transform_id_weight_pair(self, id_weight_pair):
id_tensor = id_weight_pair.id_tensor
weight_tensor = id_weight_pair.weight_tensor
# If the underlying column is weighted, return the input as a dense tensor.
if weight_tensor is not None:
weighted_column = sparse_ops.sparse_merge(
sp_ids=id_tensor,
sp_values=weight_tensor,
vocab_size=int(self._variable_shape[-1]))
# Remove (?, -1) index.
weighted_column = sparse_ops.sparse_slice(weighted_column, [0, 0],
weighted_column.dense_shape)
# Use scatter_nd to merge duplicated indices if existed,
# instead of sparse_tensor_to_dense.
return array_ops.scatter_nd(weighted_column.indices,
weighted_column.values,
weighted_column.dense_shape)
dense_id_tensor = sparse_ops.sparse_tensor_to_dense(
id_tensor, default_value=-1)
# One hot must be float for tf.concat reasons since all other inputs to
# input_layer are float32.
one_hot_id_tensor = array_ops.one_hot(
dense_id_tensor,
depth=self._variable_shape[-1],
on_value=1.0,
off_value=0.0)
# Reduce to get a multi-hot per example.
return math_ops.reduce_sum(one_hot_id_tensor, axis=[-2])
def transform_feature(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Transformed feature `Tensor`.
Raises:
ValueError: if input rank is not known at graph building time.
"""
id_weight_pair = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._transform_id_weight_pair(id_weight_pair)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._transform_id_weight_pair(id_weight_pair)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of the dense `Tensor`."""
if isinstance(self.categorical_column, FeatureColumn):
return tensor_shape.TensorShape([1, self.categorical_column.num_buckets])
else:
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
Raises:
ValueError: If `categorical_column` is a `SequenceCategoricalColumn`.
"""
if isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
if isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must not be of type _SequenceCategoricalColumn. '
'Suggested fix A: If you wish to use DenseFeatures, use a '
'non-sequence categorical_column_with_*. '
'Suggested fix B: If you wish to create sequence input, use '
'SequenceFeatures instead of DenseFeatures. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
return inputs.get(self)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""See `SequenceDenseColumn` base class."""
if not isinstance(self.categorical_column, SequenceCategoricalColumn):
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by transform_feature.
dense_tensor = transformation_cache.get(self, state_manager)
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sequence_dense_tensor(self,
inputs,
weight_collections=None,
trainable=None):
# Do nothing with weight_collections and trainable since no variables are
# created in this function.
del weight_collections
del trainable
if not isinstance(
self.categorical_column,
(SequenceCategoricalColumn, fc_old._SequenceCategoricalColumn)): # pylint: disable=protected-access
raise ValueError(
'In indicator_column: {}. '
'categorical_column must be of type _SequenceCategoricalColumn '
'to use SequenceFeatures. '
'Suggested fix: Use one of sequence_categorical_column_with_*. '
'Given (type {}): {}'.format(self.name, type(self.categorical_column),
self.categorical_column))
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
dense_tensor = inputs.get(self)
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors.id_tensor)
return SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=sequence_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _verify_static_batch_size_equality(tensors, columns):
"""Verify equality between static batch sizes.
Args:
tensors: iterable of input tensors.
columns: Corresponding feature columns.
Raises:
ValueError: in case of mismatched batch sizes.
"""
# bath_size is a Dimension object.
expected_batch_size = None
for i in range(0, len(tensors)):
batch_size = tensor_shape.Dimension(tensor_shape.dimension_value(
tensors[i].shape[0]))
if batch_size.value is not None:
if expected_batch_size is None:
bath_size_column_index = i
expected_batch_size = batch_size
elif not expected_batch_size.is_compatible_with(batch_size):
raise ValueError(
'Batch size (first dimension) of each feature must be same. '
'Batch size of columns ({}, {}): ({}, {})'.format(
columns[bath_size_column_index].name, columns[i].name,
expected_batch_size, batch_size))
class SequenceCategoricalColumn(
CategoricalColumn,
fc_old._SequenceCategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('SequenceCategoricalColumn',
('categorical_column'))):
"""Represents sequences of categorical data."""
@property
def _is_v2_column(self):
return (isinstance(self.categorical_column, FeatureColumn) and
self.categorical_column._is_v2_column) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.name
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.categorical_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.categorical_column._parse_example_spec # pylint: disable=protected-access
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class."""
return self.categorical_column.transform_feature(transformation_cache,
state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
return self.categorical_column._transform_feature(inputs) # pylint: disable=protected-access
@property
def num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.categorical_column.num_buckets
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.categorical_column._num_buckets # pylint: disable=protected-access
def _get_sparse_tensors_helper(self, sparse_tensors):
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands third dimension, if necessary so that embeddings are not
# combined during embedding lookup. If the tensor is already 3D, leave
# as-is.
shape = array_ops.shape(id_tensor)
# Compute the third dimension explicitly instead of setting it to -1, as
# that doesn't work for dynamically shaped tensors with 0-length at runtime.
# This happens for empty sequences.
target_shape = [shape[0], shape[1], math_ops.reduce_prod(shape[2:])]
id_tensor = sparse_ops.sparse_reshape(id_tensor, target_shape)
if weight_tensor is not None:
weight_tensor = sparse_ops.sparse_reshape(weight_tensor, target_shape)
return CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Returns an IdWeightPair.
`IdWeightPair` is a pair of `SparseTensor`s which represents ids and
weights.
`IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
`SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
`SparseTensor` of `float` or `None` to indicate all weights should be
taken to be 1. If specified, `weight_tensor` must have exactly the same
shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
output of a `VarLenFeature` which is a ragged matrix.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sparse_tensors = self.categorical_column.get_sparse_tensors(
transformation_cache, state_manager)
return self._get_sparse_tensors_helper(sparse_tensors)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access
return self._get_sparse_tensors_helper(sparse_tensors)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.categorical_column]
def _get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['categorical_column'] = serialize_feature_column(
self.categorical_column)
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['categorical_column'] = deserialize_feature_column(
config['categorical_column'], custom_objects, columns_by_name)
return cls(**kwargs)
def _check_config_keys(config, expected_keys):
"""Checks that a config has all expected_keys."""
if set(config.keys()) != set(expected_keys):
raise ValueError('Invalid config: {}, expected keys: {}'.format(
config, expected_keys))
def _standardize_and_copy_config(config):
"""Returns a shallow copy of config with lists turned to tuples.
Keras serialization uses nest to listify everything.
This causes problems with the NumericColumn shape, which becomes
unhashable. We could try to solve this on the Keras side, but that
would require lots of tracking to avoid changing existing behavior.
Instead, we ensure here that we revive correctly.
Args:
config: dict that will be used to revive a Feature Column
Returns:
Shallow copy of config with lists turned to tuples.
"""
kwargs = config.copy()
for k, v in kwargs.items():
if isinstance(v, list):
kwargs[k] = tuple(v)
return kwargs
|
tensorflow-master
|
tensorflow/python/feature_column/feature_column_v2.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines functions common to multiple feature column files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
def sequence_length_from_sparse_tensor(sp_tensor, num_elements=1):
"""Returns a [batch_size] Tensor with per-example sequence length."""
with ops.name_scope(None, 'sequence_length') as name_scope:
row_ids = sp_tensor.indices[:, 0]
column_ids = sp_tensor.indices[:, 1]
# Add one to convert column indices to element length
column_ids += array_ops.ones_like(column_ids)
# Get the number of elements we will have per example/row
seq_length = math_ops.segment_max(column_ids, segment_ids=row_ids)
# The raw values are grouped according to num_elements;
# how many entities will we have after grouping?
# Example: orig tensor [[1, 2], [3]], col_ids = (0, 1, 1),
# row_ids = (0, 0, 1), seq_length = [2, 1]. If num_elements = 2,
# these will get grouped, and the final seq_length is [1, 1]
seq_length = math_ops.cast(
math_ops.ceil(seq_length / num_elements), dtypes.int64)
# If the last n rows do not have ids, seq_length will have shape
# [batch_size - n]. Pad the remaining values with zeros.
n_pad = array_ops.shape(sp_tensor)[:1] - array_ops.shape(seq_length)[:1]
padding = array_ops.zeros(n_pad, dtype=seq_length.dtype)
return array_ops.concat([seq_length, padding], axis=0, name=name_scope)
def assert_string_or_int(dtype, prefix):
if (dtype != dtypes.string) and (not dtype.is_integer):
raise ValueError(
'{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype))
def assert_key_is_string(key):
if not isinstance(key, six.string_types):
raise ValueError(
'key must be a string. Got: type {}. Given key: {}.'.format(
type(key), key))
def check_default_value(shape, default_value, dtype, key):
"""Returns default value as tuple if it's valid, otherwise raises errors.
This function verifies that `default_value` is compatible with both `shape`
and `dtype`. If it is not compatible, it raises an error. If it is compatible,
it casts default_value to a tuple and returns it. `key` is used only
for error message.
Args:
shape: An iterable of integers specifies the shape of the `Tensor`.
default_value: If a single value is provided, the same value will be applied
as the default value for every item. If an iterable of values is
provided, the shape of the `default_value` should be equal to the given
`shape`.
dtype: defines the type of values. Default value is `tf.float32`. Must be a
non-quantized, real integer or floating point type.
key: Column name, used only for error messages.
Returns:
A tuple which will be used as default value.
Raises:
TypeError: if `default_value` is an iterable but not compatible with `shape`
TypeError: if `default_value` is not compatible with `dtype`.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
if default_value is None:
return None
if isinstance(default_value, int):
return _create_tuple(shape, default_value)
if isinstance(default_value, float) and dtype.is_floating:
return _create_tuple(shape, default_value)
if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays
default_value = default_value.tolist()
if nest.is_sequence(default_value):
if not _is_shape_and_default_value_compatible(default_value, shape):
raise ValueError(
'The shape of default_value must be equal to given shape. '
'default_value: {}, shape: {}, key: {}'.format(
default_value, shape, key))
# Check if the values in the list are all integers or are convertible to
# floats.
is_list_all_int = all(
isinstance(v, int) for v in nest.flatten(default_value))
is_list_has_float = any(
isinstance(v, float) for v in nest.flatten(default_value))
if is_list_all_int:
return _as_tuple(default_value)
if is_list_has_float and dtype.is_floating:
return _as_tuple(default_value)
raise TypeError('default_value must be compatible with dtype. '
'default_value: {}, dtype: {}, key: {}'.format(
default_value, dtype, key))
def _create_tuple(shape, value):
"""Returns a tuple with given shape and filled with value."""
if shape:
return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])])
return value
def _as_tuple(value):
if not nest.is_sequence(value):
return value
return tuple([_as_tuple(v) for v in value])
def _is_shape_and_default_value_compatible(default_value, shape):
"""Verifies compatibility of shape and default_value."""
# Invalid condition:
# * if default_value is not a scalar and shape is empty
# * or if default_value is an iterable and shape is not empty
if nest.is_sequence(default_value) != bool(shape):
return False
if not shape:
return True
if len(default_value) != shape[0]:
return False
for i in range(shape[0]):
if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]):
return False
return True
|
tensorflow-master
|
tensorflow/python/feature_column/utils.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn for sequential input.
NOTE: This API is a work in progress and will likely be changing frequently.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
@keras_export('keras.experimental.SequenceFeatures')
class SequenceFeatures(fc._BaseFeaturesLayer):
"""A layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
rating = sequence_numeric_column('rating')
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [rating, watches_embedding]
sequence_input_layer = SequenceFeatures(columns)
features = tf.io.parse_example(...,
features=make_parse_example_spec(columns))
sequence_input, sequence_length = sequence_input_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
"""
def __init__(
self,
feature_columns,
trainable=True,
name=None,
**kwargs):
""""Constructs a SequenceFeatures layer.
Args:
feature_columns: An iterable of dense sequence columns. Valid columns are
- `embedding_column` that wraps a `sequence_categorical_column_with_*`
- `sequence_numeric_column`.
trainable: Boolean, whether the layer's variables will be updated via
gradient descent during training.
name: Name to give to the SequenceFeatures.
**kwargs: Keyword arguments to construct a layer.
Raises:
ValueError: If any of the `feature_columns` is not a
`SequenceDenseColumn`.
"""
super(SequenceFeatures, self).__init__(
feature_columns=feature_columns,
trainable=trainable,
name=name,
expected_column_type=fc.SequenceDenseColumn,
**kwargs)
def _target_shape(self, input_shape, total_elements):
return (input_shape[0], input_shape[1], total_elements)
def call(self, features):
"""Returns sequence input corresponding to the `feature_columns`.
Args:
features: A dict mapping keys to tensors.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ
from batch to batch. `D` is the sum of `num_elements` for all
`feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence
length for each example.
Raises:
ValueError: If features are not a dictionary.
"""
if not isinstance(features, dict):
raise ValueError('We expected a dictionary here. Instead we got: ',
features)
transformation_cache = fc.FeatureTransformationCache(features)
output_tensors = []
sequence_lengths = []
for column in self._feature_columns:
with ops.name_scope(column.name):
dense_tensor, sequence_length = column.get_sequence_dense_tensor(
transformation_cache, self._state_manager)
# Flattens the final dimension to produce a 3D Tensor.
output_tensors.append(self._process_dense_tensor(column, dense_tensor))
sequence_lengths.append(sequence_length)
# Check and process sequence lengths.
fc._verify_static_batch_size_equality(sequence_lengths,
self._feature_columns)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return self._verify_and_concat_tensors(output_tensors), sequence_length
def concatenate_context_input(context_input, sequence_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
@tf_export('feature_column.sequence_categorical_column_with_identity')
def sequence_categorical_column_with_identity(
key, num_buckets, default_value=None):
"""Returns a feature column that represents sequences of integers.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [watches_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
num_buckets: Range of inputs. Namely, inputs are expected to be in the
range `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace out-of-range inputs.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_identity(
key=key,
num_buckets=num_buckets,
default_value=default_value))
@tf_export('feature_column.sequence_categorical_column_with_hash_bucket')
def sequence_categorical_column_with_hash_bucket(
key, hash_bucket_size, dtype=dtypes.string):
"""A sequence of categorical terms where ids are set by hashing.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
tokens = sequence_categorical_column_with_hash_bucket(
'tokens', hash_bucket_size=1000)
tokens_embedding = embedding_column(tokens, dimension=10)
columns = [tokens_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_hash_bucket(
key=key,
hash_bucket_size=hash_bucket_size,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_file')
def sequence_categorical_column_with_vocabulary_file(
key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0,
default_value=None, dtype=dtypes.string):
"""A sequence of categorical terms where ids use a vocabulary file.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
states = sequence_categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
states_embedding = embedding_column(states, dimension=10)
columns = [states_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_vocabulary_file(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_list')
def sequence_categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A sequence of categorical terms where ids use an in-memory list.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
colors = sequence_categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
colors_embedding = embedding_column(colors, dimension=3)
columns = [colors_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_vocabulary_list(
key=key,
vocabulary_list=vocabulary_list,
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets))
@tf_export('feature_column.sequence_numeric_column')
def sequence_numeric_column(
key,
shape=(1,),
default_value=0.,
dtype=dtypes.float32,
normalizer_fn=None):
"""Returns a feature column that represents sequences of numeric data.
Example:
```python
temperature = sequence_numeric_column('temperature')
columns = [temperature]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input features.
shape: The shape of the input data per sequence id. E.g. if `shape=(2,)`,
each example must contain `2 * sequence_length` values.
default_value: A single value compatible with `dtype` that is used for
padding the sparse data into a dense `Tensor`.
dtype: The type of values.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `SequenceNumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int.
ValueError: if any dimension in shape is not a positive integer.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = fc._check_shape(shape=shape, key=key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return SequenceNumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with ops.name_scope(name, 'assert_all_equal', values=tensors):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0])
class SequenceNumericColumn(
fc.SequenceDenseColumn,
collections.namedtuple(
'SequenceNumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""Represents sequences of numeric data."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return input_tensor
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of sequence input."""
return tensor_shape.TensorShape(self.shape)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sp_tensor = transformation_cache.get(self, state_manager)
dense_tensor = sparse_ops.sparse_tensor_to_dense(
sp_tensor, default_value=self.default_value)
# Reshape into [batch_size, T, variable_shape].
dense_shape = array_ops.concat(
[array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape],
axis=0)
dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)
# Get the number of timesteps per example
# For the 2D case, the raw values are grouped according to num_elements;
# for the 3D case, the grouping happens in the third dimension, and
# sequence length is not affected.
if sp_tensor.shape.ndims == 2:
num_elements = self.variable_shape.num_elements()
else:
num_elements = 1
seq_length = fc_utils.sequence_length_from_sparse_tensor(
sp_tensor, num_elements=num_elements)
return fc.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=seq_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def _get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = utils.serialize_keras_object(self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
fc._check_config_keys(config, cls._fields)
kwargs = config.copy()
kwargs['normalizer_fn'] = utils.deserialize_keras_object(
config['normalizer_fn'], custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
# pylint: enable=protected-access
|
tensorflow-master
|
tensorflow/python/feature_column/sequence_feature_column.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for feature_column and DenseFeatures serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import serialization
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class FeatureColumnSerializationTest(test.TestCase):
"""Tests for serialization, deserialization helpers."""
def test_serialize_non_feature_column(self):
class NotAFeatureColumn(object):
pass
with self.assertRaisesRegexp(ValueError, 'is not a FeatureColumn'):
serialization.serialize_feature_column(NotAFeatureColumn())
def test_deserialize_invalid_config(self):
with self.assertRaisesRegexp(ValueError, 'Improper config format: {}'):
serialization.deserialize_feature_column({})
def test_deserialize_config_missing_key(self):
config_missing_key = {
'config': {
# Dtype is missing and should cause a failure.
# 'dtype': 'int32',
'default_value': None,
'key': 'a',
'normalizer_fn': None,
'shape': (2,)
},
'class_name': 'NumericColumn'
}
with self.assertRaisesRegexp(
ValueError, 'Invalid config:.*expected keys.*dtype'):
serialization.deserialize_feature_column(config_missing_key)
def test_deserialize_invalid_class(self):
with self.assertRaisesRegexp(
ValueError, 'Unknown feature_column_v2: NotExistingFeatureColumnClass'):
serialization.deserialize_feature_column({
'class_name': 'NotExistingFeatureColumnClass',
'config': {}
})
def test_deserialization_deduping(self):
price = fc.numeric_column('price')
bucketized_price = fc.bucketized_column(price, boundaries=[0, 1])
configs = serialization.serialize_feature_columns([price, bucketized_price])
deserialized_feature_columns = serialization.deserialize_feature_columns(
configs)
self.assertLen(deserialized_feature_columns, 2)
new_price = deserialized_feature_columns[0]
new_bucketized_price = deserialized_feature_columns[1]
# Ensure these are not the original objects:
self.assertIsNot(price, new_price)
self.assertIsNot(bucketized_price, new_bucketized_price)
# But they are equivalent:
self.assertEqual(price, new_price)
self.assertEqual(bucketized_price, new_bucketized_price)
# Check that deduping worked:
self.assertIs(new_bucketized_price.source_column, new_price)
def deserialization_custom_objects(self):
# Note that custom_objects is also tested extensively above per class, this
# test ensures that the public wrappers also handle it correctly.
def _custom_fn(input_tensor):
return input_tensor + 42.
price = fc.numeric_column('price', normalizer_fn=_custom_fn)
configs = serialization.serialize_feature_columns([price])
deserialized_feature_columns = serialization.deserialize_feature_columns(
configs)
self.assertLen(deserialized_feature_columns, 1)
new_price = deserialized_feature_columns[0]
# Ensure these are not the original objects:
self.assertIsNot(price, new_price)
# But they are equivalent:
self.assertEqual(price, new_price)
# Check that normalizer_fn points to the correct function.
self.assertIs(new_price.normalizer_fn, _custom_fn)
@test_util.run_all_in_graph_and_eager_modes
class DenseFeaturesSerializationTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('default', None, None),
('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_get_config(self, trainable, name):
cols = [fc.numeric_column('a'),
fc.embedding_column(fc.categorical_column_with_identity(
key='b', num_buckets=3), dimension=2)]
orig_layer = fc.DenseFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
self.assertEqual(config['name'], orig_layer.name)
self.assertEqual(config['trainable'], trainable)
self.assertLen(config['feature_columns'], 2)
self.assertEqual(
config['feature_columns'][0]['class_name'], 'NumericColumn')
self.assertEqual(config['feature_columns'][0]['config']['shape'], (1,))
self.assertEqual(
config['feature_columns'][1]['class_name'], 'EmbeddingColumn')
@parameterized.named_parameters(
('default', None, None),
('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_from_config(self, trainable, name):
cols = [fc.numeric_column('a'),
fc.embedding_column(fc.categorical_column_with_vocabulary_list(
'b', vocabulary_list=['1', '2', '3']), dimension=2),
fc.indicator_column(fc.categorical_column_with_hash_bucket(
key='c', hash_bucket_size=3))]
orig_layer = fc.DenseFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
new_layer = fc.DenseFeatures.from_config(config)
self.assertEqual(new_layer.name, orig_layer.name)
self.assertEqual(new_layer.trainable, trainable)
self.assertLen(new_layer._feature_columns, 3)
self.assertEqual(new_layer._feature_columns[0].name, 'a')
self.assertEqual(new_layer._feature_columns[1].initializer.mean, 0.0)
self.assertEqual(new_layer._feature_columns[1].categorical_column.name, 'b')
self.assertIsInstance(new_layer._feature_columns[2], fc.IndicatorColumn)
@test_util.run_all_in_graph_and_eager_modes
class LinearModelLayerSerializationTest(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('default', 1, 'sum', None, None),
('trainable', 6, 'mean', True, 'trainable'),
('not_trainable', 10, 'sum', False, 'frozen'))
def test_get_config(self, units, sparse_combiner, trainable, name):
cols = [fc.numeric_column('a'),
fc.categorical_column_with_identity(key='b', num_buckets=3)]
layer = fc._LinearModelLayer(
cols, units=units, sparse_combiner=sparse_combiner,
trainable=trainable, name=name)
config = layer.get_config()
self.assertEqual(config['name'], layer.name)
self.assertEqual(config['trainable'], trainable)
self.assertEqual(config['units'], units)
self.assertEqual(config['sparse_combiner'], sparse_combiner)
self.assertLen(config['feature_columns'], 2)
self.assertEqual(
config['feature_columns'][0]['class_name'], 'NumericColumn')
self.assertEqual(
config['feature_columns'][1]['class_name'], 'IdentityCategoricalColumn')
@parameterized.named_parameters(
('default', 1, 'sum', None, None),
('trainable', 6, 'mean', True, 'trainable'),
('not_trainable', 10, 'sum', False, 'frozen'))
def test_from_config(self, units, sparse_combiner, trainable, name):
cols = [fc.numeric_column('a'),
fc.categorical_column_with_vocabulary_list(
'b', vocabulary_list=('1', '2', '3')),
fc.categorical_column_with_hash_bucket(
key='c', hash_bucket_size=3)]
orig_layer = fc._LinearModelLayer(
cols, units=units, sparse_combiner=sparse_combiner,
trainable=trainable, name=name)
config = orig_layer.get_config()
new_layer = fc._LinearModelLayer.from_config(config)
self.assertEqual(new_layer.name, orig_layer.name)
self.assertEqual(new_layer._units, units)
self.assertEqual(new_layer._sparse_combiner, sparse_combiner)
self.assertEqual(new_layer.trainable, trainable)
self.assertLen(new_layer._feature_columns, 3)
self.assertEqual(new_layer._feature_columns[0].name, 'a')
self.assertEqual(
new_layer._feature_columns[1].vocabulary_list, ('1', '2', '3'))
self.assertEqual(new_layer._feature_columns[2].num_buckets, 3)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/feature_column/serialization_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.