python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for feature_column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import numpy as np from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import keras from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.feature_column import feature_column as fc_old from tensorflow.python.feature_column import feature_column_v2 as fc from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner_impl from tensorflow.python.training import rmsprop from tensorflow_estimator.python.estimator.inputs import numpy_io def _initialized_session(config=None): sess = session.Session(config=config) sess.run(variables_lib.global_variables_initializer()) sess.run(lookup_ops.tables_initializer()) return sess def get_linear_model_bias(name='linear_model'): with variable_scope.variable_scope(name, reuse=True): return variable_scope.get_variable('bias_weights') def get_linear_model_column_var(column, name='linear_model'): return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, name + '/' + column.name)[0] class BaseFeatureColumnForTests(fc.FeatureColumn): """A base FeatureColumn useful to avoid boiler-plate in tests. Provides dummy implementations for abstract methods that raise ValueError in order to avoid re-defining all abstract methods for each test sub-class. """ @property def parents(self): raise ValueError('Should not use this method.') @classmethod def _from_config(cls, config, custom_objects=None, columns_by_name=None): raise ValueError('Should not use this method.') def _get_config(self): raise ValueError('Should not use this method.') class LazyColumnTest(test.TestCase): def test_transformations_called_once(self): class TransformCounter(BaseFeatureColumnForTests): def __init__(self): self.num_transform = 0 @property def _is_v2_column(self): return True @property def name(self): return 'TransformCounter' def transform_feature(self, transformation_cache, state_manager): self.num_transform += 1 # Count transform calls. return transformation_cache.get('a', state_manager) @property def parse_example_spec(self): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) column = TransformCounter() self.assertEqual(0, column.num_transform) transformation_cache.get(column, None) self.assertEqual(1, column.num_transform) transformation_cache.get(column, None) self.assertEqual(1, column.num_transform) def test_returns_transform_output(self): class Transformer(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'Transformer' def transform_feature(self, transformation_cache, state_manager): return 'Output' @property def parse_example_spec(self): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) column = Transformer() self.assertEqual('Output', transformation_cache.get(column, None)) self.assertEqual('Output', transformation_cache.get(column, None)) def test_does_not_pollute_given_features_dict(self): class Transformer(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'Transformer' def transform_feature(self, transformation_cache, state_manager): return 'Output' @property def parse_example_spec(self): pass features = {'a': [[2], [3.]]} transformation_cache = fc.FeatureTransformationCache(features=features) transformation_cache.get(Transformer(), None) self.assertEqual(['a'], list(features.keys())) def test_error_if_feature_is_not_found(self): transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) with self.assertRaisesRegexp(ValueError, 'bbb is not in features dictionary'): transformation_cache.get('bbb', None) with self.assertRaisesRegexp(ValueError, 'bbb is not in features dictionary'): transformation_cache.get(u'bbb', None) def test_not_supported_feature_column(self): class NotAProperColumn(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'NotAProperColumn' def transform_feature(self, transformation_cache, state_manager): # It should return not None. pass @property def parse_example_spec(self): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) with self.assertRaisesRegexp(ValueError, 'NotAProperColumn is not supported'): transformation_cache.get(NotAProperColumn(), None) def test_key_should_be_string_or_feature_colum(self): class NotAFeatureColumn(object): pass transformation_cache = fc.FeatureTransformationCache( features={'a': [[2], [3.]]}) with self.assertRaisesRegexp( TypeError, '"key" must be either a "str" or "FeatureColumn".'): transformation_cache.get(NotAFeatureColumn(), None) @test_util.run_deprecated_v1 def test_expand_dim_rank_1_sparse_tensor_empty_batch(self): # empty 1-D sparse tensor: transformation_cache = fc.FeatureTransformationCache( features={ 'a': sparse_tensor.SparseTensor( indices=np.reshape(np.array([], dtype=np.int64), (0, 1)), dense_shape=[0], values=np.array([])) }) spv = self.evaluate(transformation_cache.get('a', None)) self.assertAllEqual(np.array([0, 1], dtype=np.int64), spv.dense_shape) self.assertAllEqual( np.reshape(np.array([], dtype=np.int64), (0, 2)), spv.indices) class NumericColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): a = fc.numeric_column('aaa') self.assertEqual('aaa', a.key) self.assertEqual('aaa', a.name) self.assertEqual((1,), a.shape) self.assertIsNone(a.default_value) self.assertEqual(dtypes.float32, a.dtype) self.assertIsNone(a.normalizer_fn) self.assertTrue(a._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.numeric_column(key=('aaa',)) def test_shape_saved_as_tuple(self): a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]]) self.assertEqual((1, 2), a.shape) def test_default_value_saved_as_tuple(self): a = fc.numeric_column('aaa', default_value=4.) self.assertEqual((4.,), a.default_value) a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]]) self.assertEqual(((3., 2.),), a.default_value) def test_shape_and_default_value_compatibility(self): fc.numeric_column('aaa', shape=[2], default_value=[1, 2.]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc.numeric_column('aaa', shape=[2], default_value=[1, 2, 3.]) fc.numeric_column( 'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc.numeric_column( 'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc.numeric_column( 'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]]) def test_default_value_type_check(self): fc.numeric_column( 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32) fc.numeric_column( 'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'): fc.numeric_column( 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32) with self.assertRaisesRegexp(TypeError, 'default_value must be compatible with dtype'): fc.numeric_column('aaa', default_value=['string']) def test_shape_must_be_positive_integer(self): with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'): fc.numeric_column( 'aaa', shape=[ 1.0, ]) with self.assertRaisesRegexp(ValueError, 'shape dimensions must be greater than 0'): fc.numeric_column( 'aaa', shape=[ 0, ]) def test_dtype_is_convertible_to_float(self): with self.assertRaisesRegexp(ValueError, 'dtype must be convertible to float'): fc.numeric_column('aaa', dtype=dtypes.string) def test_scalar_default_value_fills_the_shape(self): a = fc.numeric_column('aaa', shape=[2, 3], default_value=2.) self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value) def test_parse_spec(self): a = fc.numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32) self.assertEqual({ 'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32) }, a.parse_example_spec) @test_util.run_deprecated_v1 def test_parse_example_no_default_value(self): price = fc.numeric_column('price', shape=[2]) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([price])) self.assertIn('price', features) self.assertAllEqual([[20., 110.]], self.evaluate(features['price'])) @test_util.run_deprecated_v1 def test_parse_example_with_default_value(self): price = fc.numeric_column('price', shape=[2], default_value=11.) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) no_data = example_pb2.Example( features=feature_pb2.Features( feature={ 'something_else': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString(), no_data.SerializeToString()], features=fc.make_parse_example_spec_v2([price])) self.assertIn('price', features) self.assertAllEqual([[20., 110.], [11., 11.]], self.evaluate(features['price'])) def test_normalizer_fn_must_be_callable(self): with self.assertRaisesRegexp(TypeError, 'must be a callable'): fc.numeric_column('price', normalizer_fn='NotACallable') @test_util.run_deprecated_v1 def test_normalizer_fn_transform_feature(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two) output = fc._transform_features_v2({ 'price': [[1., 2.], [5., 6.]] }, [price], None) self.assertAllEqual([[3., 4.], [7., 8.]], self.evaluate(output[price])) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc.numeric_column('price', shape=[2], normalizer_fn=_increment_two) transformation_cache = fc.FeatureTransformationCache({ 'price': [[1., 2.], [5., 6.]] }) self.assertEqual( transformation_cache.get(price, None), price.get_dense_tensor(transformation_cache, None)) def test_sparse_tensor_not_supported(self): price = fc.numeric_column('price') transformation_cache = fc.FeatureTransformationCache({ 'price': sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0.3], dense_shape=[1, 1]) }) with self.assertRaisesRegexp(ValueError, 'must be a Tensor'): price.transform_feature(transformation_cache, None) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]]) a_copy = copy.deepcopy(a) self.assertEqual(a_copy.name, 'aaa') self.assertEqual(a_copy.shape, (1, 2)) self.assertEqual(a_copy.default_value, ((3., 2.),)) def test_numpy_default_value(self): a = fc.numeric_column( 'aaa', shape=[1, 2], default_value=np.array([[3., 2.]])) self.assertEqual(a.default_value, ((3., 2.),)) @test_util.run_deprecated_v1 def test_linear_model(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} model = fc.LinearModel([price]) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.]])) self.assertAllClose([[10.], [50.]], self.evaluate(predictions)) def test_old_linear_model(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.]])) self.assertAllClose([[10.], [50.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc.numeric_column('price', normalizer_fn=_increment_two) self.assertEqual(['price'], price.parents) config = price._get_config() self.assertEqual({ 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': '_increment_two' }, config) new_col = fc.NumericColumn._from_config( config, custom_objects={'_increment_two': _increment_two}) self.assertEqual(price, new_col) self.assertEqual(new_col.shape, (1,)) class BucketizedColumnTest(test.TestCase): def test_invalid_source_column_type(self): a = fc.categorical_column_with_hash_bucket('aaa', hash_bucket_size=10) with self.assertRaisesRegexp( ValueError, 'source_column must be a column generated with numeric_column'): fc.bucketized_column(a, boundaries=[0, 1]) def test_invalid_source_column_shape(self): a = fc.numeric_column('aaa', shape=[2, 3]) with self.assertRaisesRegexp( ValueError, 'source_column must be one-dimensional column'): fc.bucketized_column(a, boundaries=[0, 1]) def test_invalid_boundaries(self): a = fc.numeric_column('aaa') with self.assertRaisesRegexp(ValueError, 'boundaries must not be empty'): fc.bucketized_column(a, boundaries=None) with self.assertRaisesRegexp(ValueError, 'boundaries must be a sorted list'): fc.bucketized_column(a, boundaries=1.) with self.assertRaisesRegexp(ValueError, 'boundaries must be a sorted list'): fc.bucketized_column(a, boundaries=[1, 0]) with self.assertRaisesRegexp(ValueError, 'boundaries must be a sorted list'): fc.bucketized_column(a, boundaries=[1, 1]) def test_name(self): a = fc.numeric_column('aaa', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) self.assertTrue(b._is_v2_column) self.assertEqual('aaa_bucketized', b.name) def test_is_v2_column_old_numeric(self): a = fc_old._numeric_column('aaa', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) self.assertFalse(b._is_v2_column) self.assertEqual('aaa_bucketized', b.name) def test_parse_spec(self): a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) self.assertEqual({ 'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32) }, b.parse_example_spec) def test_variable_shape(self): a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) # Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3]. self.assertAllEqual((2, 3), b.variable_shape) def test_num_buckets(self): a = fc.numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) # Column 'aaa` has shape [2] times three buckets -> num_buckets=6. self.assertEqual(6, b.num_buckets) @test_util.run_deprecated_v1 def test_parse_example(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 50]) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([bucketized_price])) self.assertIn('price', features) self.assertAllEqual([[20., 110.]], self.evaluate(features['price'])) @test_util.run_deprecated_v1 def test_transform_feature(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformed_tensor = fc._transform_features_v2({ 'price': [[-1., 1.], [5., 6.]] }, [bucketized_price], None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0, 1], [3, 4]], self.evaluate(transformed_tensor[bucketized_price])) def test_get_dense_tensor_one_input_value(self): """Tests _get_dense_tensor() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1.], [1.], [5.], [6.]] }) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) bucketized_price_tensor = bucketized_price.get_dense_tensor( transformation_cache, None) self.assertAllClose( # One-hot tensor. [[[1., 0., 0., 0., 0.]], [[0., 1., 0., 0., 0.]], [[0., 0., 0., 1., 0.]], [[0., 0., 0., 0., 1.]]], self.evaluate(bucketized_price_tensor)) def test_get_dense_tensor_two_input_values(self): """Tests _get_dense_tensor() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1., 1.], [5., 6.]] }) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) bucketized_price_tensor = bucketized_price.get_dense_tensor( transformation_cache, None) self.assertAllClose( # One-hot tensor. [[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]], [[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]], self.evaluate(bucketized_price_tensor)) def test_get_sparse_tensors_one_input_value(self): """Tests _get_sparse_tensors() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1.], [1.], [5.], [6.]] }) with _initialized_session() as sess: id_weight_pair = bucketized_price.get_sparse_tensors( transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) id_tensor_value = sess.run(id_weight_pair.id_tensor) self.assertAllEqual([[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices) self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values) self.assertAllEqual([4, 1], id_tensor_value.dense_shape) def test_get_sparse_tensors_two_input_values(self): """Tests _get_sparse_tensors() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'price': [[-1., 1.], [5., 6.]] }) with _initialized_session() as sess: id_weight_pair = bucketized_price.get_sparse_tensors( transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) id_tensor_value = sess.run(id_weight_pair.id_tensor) self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices) # Values 0-4 correspond to the first column of the input price. # Values 5-9 correspond to the second column of the input price. self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values) self.assertAllEqual([2, 2], id_tensor_value.dense_shape) def test_sparse_tensor_input_not_supported(self): price = fc.numeric_column('price') bucketized_price = fc.bucketized_column(price, boundaries=[0, 1]) transformation_cache = fc.FeatureTransformationCache({ 'price': sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0.3], dense_shape=[1, 1]) }) with self.assertRaisesRegexp(ValueError, 'must be a Tensor'): bucketized_price.transform_feature(transformation_cache, None) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.numeric_column('aaa', shape=[2]) a_bucketized = fc.bucketized_column(a, boundaries=[0, 1]) a_bucketized_copy = copy.deepcopy(a_bucketized) self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized') self.assertAllEqual(a_bucketized_copy.variable_shape, (2, 3)) self.assertEqual(a_bucketized_copy.boundaries, (0, 1)) def test_linear_model_one_input_value(self): """Tests linear_model() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} model = fc.LinearModel([bucketized_price]) predictions = model(features) bucketized_price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) def test_linear_model_two_input_values(self): """Tests linear_model() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1., 1.], [5., 6.]]} model = fc.LinearModel([bucketized_price]) predictions = model(features) bucketized_price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight per bucket per input column, all initialized to zero. self.assertAllClose( [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.], [60.], [70.], [80.], [90.], [100.]])) # 1st example: # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 6th bucket, whose weight is 70. # 2nd example: # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 9th bucket, whose weight is 100. self.assertAllClose([[80.], [140.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[81.], [141.]], self.evaluate(predictions)) def test_old_linear_model_one_input_value(self): """Tests linear_model() for input with shape=[1].""" price = fc.numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} predictions = fc_old.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) def test_old_linear_model_two_input_values(self): """Tests linear_model() for input with shape=[2].""" price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1., 1.], [5., 6.]]} predictions = fc_old.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight per bucket per input column, all initialized to zero. self.assertAllClose( [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.], [60.], [70.], [80.], [90.], [100.]])) # 1st example: # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 6th bucket, whose weight is 70. # 2nd example: # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 9th bucket, whose weight is 100. self.assertAllClose([[80.], [140.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[81.], [141.]], self.evaluate(predictions)) def test_old_linear_model_one_input_value_old_numeric(self): """Tests linear_model() for input with shape=[1].""" price = fc_old._numeric_column('price', shape=[1]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} predictions = fc_old.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 2, 4, 6]) self.assertEqual([price], bucketized_price.parents) config = bucketized_price._get_config() self.assertEqual({ 'source_column': { 'class_name': 'NumericColumn', 'config': { 'key': 'price', 'shape': (2,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } }, 'boundaries': (0, 2, 4, 6) }, config) new_bucketized_price = fc.BucketizedColumn._from_config(config) self.assertEqual(bucketized_price, new_bucketized_price) self.assertIsNot(price, new_bucketized_price.source_column) new_bucketized_price = fc.BucketizedColumn._from_config( config, columns_by_name={price.name: price}) self.assertEqual(bucketized_price, new_bucketized_price) self.assertIs(price, new_bucketized_price.source_column) class HashedCategoricalColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): a = fc.categorical_column_with_hash_bucket('aaa', 10) self.assertEqual('aaa', a.name) self.assertEqual('aaa', a.key) self.assertEqual(10, a.hash_bucket_size) self.assertEqual(dtypes.string, a.dtype) self.assertTrue(a._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_hash_bucket(('key',), 10) def test_bucket_size_should_be_given(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'): fc.categorical_column_with_hash_bucket('aaa', None) def test_bucket_size_should_be_positive(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be at least 1'): fc.categorical_column_with_hash_bucket('aaa', 0) def test_dtype_should_be_string_or_integer(self): fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string) fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_hash_bucket('aaa', 10) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(10, column.hash_bucket_size) self.assertEqual(10, column.num_buckets) self.assertEqual(dtypes.string, column.dtype) def test_parse_spec_string(self): a = fc.categorical_column_with_hash_bucket('aaa', 10) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, a.parse_example_spec) def test_parse_spec_int(self): a = fc.categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, a.parse_example_spec) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_hash_bucket('aaa', 10) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_strings_should_be_hashed(self): hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) outputs = fc._transform_features_v2({ 'wire': wire_tensor }, [hashed_sparse], None) output = outputs[hashed_sparse] # Check exact hashed output. If hashing changes this test will break. expected_values = [6, 4, 1] self.assertEqual(dtypes.int64, output.values.dtype) self.assertAllEqual(expected_values, self.evaluate(output.values)) self.assertAllEqual( self.evaluate(wire_tensor.indices), self.evaluate(output.indices)) self.assertAllEqual( self.evaluate(wire_tensor.dense_shape), self.evaluate(output.dense_shape)) def test_tensor_dtype_should_be_string_or_integer(self): string_fc = fc.categorical_column_with_hash_bucket( 'a_string', 10, dtype=dtypes.string) int_fc = fc.categorical_column_with_hash_bucket( 'a_int', 10, dtype=dtypes.int32) float_fc = fc.categorical_column_with_hash_bucket( 'a_float', 10, dtype=dtypes.string) int_tensor = sparse_tensor.SparseTensor( values=[101], indices=[[0, 0]], dense_shape=[1, 1]) string_tensor = sparse_tensor.SparseTensor( values=['101'], indices=[[0, 0]], dense_shape=[1, 1]) float_tensor = sparse_tensor.SparseTensor( values=[101.], indices=[[0, 0]], dense_shape=[1, 1]) transformation_cache = fc.FeatureTransformationCache({ 'a_int': int_tensor, 'a_string': string_tensor, 'a_float': float_tensor }) transformation_cache.get(string_fc, None) transformation_cache.get(int_fc, None) with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): transformation_cache.get(float_fc, None) def test_dtype_should_match_with_tensor(self): hashed_sparse = fc.categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor}) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): transformation_cache.get(hashed_sparse, None) @test_util.run_deprecated_v1 def test_ints_should_be_hashed(self): hashed_sparse = fc.categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=[101, 201, 301], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor}) output = transformation_cache.get(hashed_sparse, None) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] self.assertAllEqual(expected_values, self.evaluate(output.values)) @test_util.run_deprecated_v1 def test_int32_64_is_compatible(self): hashed_sparse = fc.categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=constant_op.constant([101, 201, 301], dtype=dtypes.int32), indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) transformation_cache = fc.FeatureTransformationCache({'wire': wire_tensor}) output = transformation_cache.get(hashed_sparse, None) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] self.assertAllEqual(expected_values, self.evaluate(output.values)) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) transformation_cache = fc.FeatureTransformationCache({ 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) }) id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual( transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) transformation_cache = fc.FeatureTransformationCache({ 'wire': (('omar', ''), ('stringer', 'marlo')) }) id_weight_pair = hashed_sparse.get_sparse_tensors(transformation_cache, None) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual( transformation_cache.get(hashed_sparse, None), id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc.categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((wire_column,)) predictions = model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }) wire_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions)) def test_old_linear_model(self): wire_column = fc.categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): wire_column = fc.categorical_column_with_hash_bucket('wire', 4) self.assertEqual(['wire'], wire_column.parents) config = wire_column._get_config() self.assertEqual({ 'key': 'wire', 'hash_bucket_size': 4, 'dtype': 'string' }, config) self.assertEqual(wire_column, fc.HashedCategoricalColumn._from_config(config)) class CrossedColumnTest(test.TestCase): def test_keys_empty(self): with self.assertRaisesRegexp(ValueError, 'keys must be a list with length > 1'): fc.crossed_column([], 10) def test_keys_length_one(self): with self.assertRaisesRegexp(ValueError, 'keys must be a list with length > 1'): fc.crossed_column(['a'], 10) def test_key_type_unsupported(self): with self.assertRaisesRegexp(ValueError, 'Unsupported key type'): fc.crossed_column(['a', fc.numeric_column('c')], 10) with self.assertRaisesRegexp( ValueError, 'categorical_column_with_hash_bucket is not supported'): fc.crossed_column( ['a', fc.categorical_column_with_hash_bucket('c', 10)], 10) def test_hash_bucket_size_negative(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'): fc.crossed_column(['a', 'c'], -1) def test_hash_bucket_size_zero(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'): fc.crossed_column(['a', 'c'], 0) def test_hash_bucket_size_none(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be > 1'): fc.crossed_column(['a', 'c'], None) def test_name(self): a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) self.assertTrue(crossed1._is_v2_column) crossed2 = fc.crossed_column([b, 'c', crossed1], 10) self.assertTrue(crossed2._is_v2_column) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_is_v2_column(self): a = fc_old._numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) self.assertTrue(crossed1._is_v2_column) crossed2 = fc.crossed_column([b, 'c', crossed1], 10) self.assertFalse(crossed2._is_v2_column) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_name_ordered_alphabetically(self): """Tests that the name does not depend on the order of given columns.""" a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) crossed2 = fc.crossed_column([crossed1, 'c', b], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_name_leaf_keys_ordered_alphabetically(self): """Tests that the name does not depend on the order of given columns.""" a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d2', 'c'], 10) crossed2 = fc.crossed_column([crossed1, 'd1', b], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_parse_spec(self): a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed = fc.crossed_column([b, 'c'], 10) self.assertEqual({ 'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32), 'c': parsing_ops.VarLenFeature(dtypes.string), }, crossed.parse_example_spec) def test_num_buckets(self): a = fc.numeric_column('a', shape=[2], dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed = fc.crossed_column([b, 'c'], 15) self.assertEqual(15, crossed.num_buckets) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.numeric_column('a', dtype=dtypes.int32) b = fc.bucketized_column(a, boundaries=[0, 1]) crossed1 = fc.crossed_column(['d1', 'd2'], 10) crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5) crossed2_copy = copy.deepcopy(crossed2) self.assertEqual( 'a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name, ) self.assertEqual(15, crossed2_copy.hash_bucket_size) self.assertEqual(5, crossed2_copy.hash_key) @test_util.run_deprecated_v1 def test_parse_example(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 50]) price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], 10) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[20., 110.])), 'wire': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([price_cross_wire])) self.assertIn('price', features) self.assertIn('wire', features) self.assertAllEqual([[20., 110.]], self.evaluate(features['price'])) wire_sparse = features['wire'] self.assertAllEqual([[0, 0], [0, 1]], self.evaluate(wire_sparse.indices)) # Use byte constants to pass the open-source test. self.assertAllEqual([b'omar', b'stringer'], self.evaluate(wire_sparse.values)) self.assertAllEqual([1, 2], self.evaluate(wire_sparse.dense_shape)) @test_util.run_deprecated_v1 def test_transform_feature(self): price = fc.numeric_column('price', shape=[2]) bucketized_price = fc.bucketized_column(price, boundaries=[0, 50]) hash_bucket_size = 10 price_cross_wire = fc.crossed_column([bucketized_price, 'wire'], hash_bucket_size) features = { 'price': constant_op.constant([[1., 2.], [5., 6.]]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]), } outputs = fc._transform_features_v2(features, [price_cross_wire], None) output = outputs[price_cross_wire] output_val = self.evaluate(output) self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices) for val in output_val.values: self.assertIn(val, list(range(hash_bucket_size))) self.assertAllEqual([2, 4], output_val.dense_shape) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed1 = fc.crossed_column(['d1', 'd2'], 10) crossed2 = fc.crossed_column([b, 'c', crossed1], 15, hash_key=5) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), 'd1': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['d1A', 'd1B', 'd1C'], dense_shape=(2, 2)), 'd2': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['d2A', 'd2B', 'd2C'], dense_shape=(2, 2)), }) id_weight_pair = crossed2.get_sparse_tensors(transformation_cache, None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) id_tensor_eval = self.evaluate(id_weight_pair.id_tensor) self.assertAllEqual( ((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13), (1, 14), (1, 15)), id_tensor_eval.indices) # Check exact hashed output. If hashing changes this test will break. # All values are within [0, hash_bucket_size). expected_values = (6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11) self.assertAllEqual(expected_values, id_tensor_eval.values) self.assertAllEqual((2, 16), id_tensor_eval.dense_shape) def test_get_sparse_tensors_simple(self): """Same as test_get_sparse_tensors, but with simpler values.""" a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): transformation_cache = fc.FeatureTransformationCache({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }) id_weight_pair = crossed.get_sparse_tensors(transformation_cache, None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) id_tensor_eval = self.evaluate(id_weight_pair.id_tensor) self.assertAllEqual(((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)), id_tensor_eval.indices) # Check exact hashed output. If hashing changes this test will break. # All values are within [0, hash_bucket_size). expected_values = (1, 0, 1, 3, 4, 2) self.assertAllEqual(expected_values, id_tensor_eval.values) self.assertAllEqual((2, 4), id_tensor_eval.dense_shape) @test_util.run_deprecated_v1 def test_linear_model(self): """Tests linear_model. Uses data from test_get_sparse_tesnsors_simple. """ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): model = fc.LinearModel((crossed,)) predictions = model({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }) crossed_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) def test_linear_model_with_weights(self): class _TestColumnWithWeights(BaseFeatureColumnForTests, fc.CategoricalColumn): """Produces sparse IDs and sparse weights.""" @property def _is_v2_column(self): return True @property def name(self): return 'test_column' @property def parse_example_spec(self): return { self.name: parsing_ops.VarLenFeature(dtypes.int32), '{}_weights'.format(self.name): parsing_ops.VarLenFeature(dtypes.float32), } @property def num_buckets(self): return 5 def transform_feature(self, transformation_cache, state_manager): return (transformation_cache.get(self.name, state_manager), transformation_cache.get('{}_weights'.format(self.name), state_manager)) def get_sparse_tensors(self, transformation_cache, state_manager): """Populates both id_tensor and weight_tensor.""" ids_and_weights = transformation_cache.get(self, state_manager) return fc.CategoricalColumn.IdWeightPair( id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1]) t = _TestColumnWithWeights() crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): with self.assertRaisesRegexp( ValueError, 'crossed_column does not support weight_tensor.*{}'.format(t.name)): model = fc.LinearModel((crossed,)) model({ t.name: sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[0, 1, 2], dense_shape=(2, 2)), '{}_weights'.format(t.name): sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[1., 10., 2.], dense_shape=(2, 2)), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }) def test_old_linear_model(self): """Tests linear_model. Uses data from test_get_sparse_tesnsors_simple. """ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) bias = get_linear_model_bias() crossed_var = get_linear_model_column_var(crossed) with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) def test_old_linear_model_with_weights(self): class _TestColumnWithWeights(BaseFeatureColumnForTests, fc.CategoricalColumn, fc_old._CategoricalColumn): """Produces sparse IDs and sparse weights.""" @property def _is_v2_column(self): return True @property def name(self): return 'test_column' @property def parse_example_spec(self): return { self.name: parsing_ops.VarLenFeature(dtypes.int32), '{}_weights'.format(self.name): parsing_ops.VarLenFeature(dtypes.float32), } @property def _parse_example_spec(self): return self.parse_example_spec @property def num_buckets(self): return 5 @property def _num_buckets(self): return self.num_buckets def transform_feature(self, transformation_cache, state_manager): raise ValueError('Should not be called.') def _transform_feature(self, inputs): return (inputs.get(self.name), inputs.get('{}_weights'.format(self.name))) def get_sparse_tensors(self, transformation_cache, state_manager): raise ValueError('Should not be called.') def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Populates both id_tensor and weight_tensor.""" ids_and_weights = inputs.get(self) return fc.CategoricalColumn.IdWeightPair( id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1]) t = _TestColumnWithWeights() crossed = fc.crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): with self.assertRaisesRegexp( ValueError, 'crossed_column does not support weight_tensor.*{}'.format(t.name)): fc_old.linear_model({ t.name: sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[0, 1, 2], dense_shape=(2, 2)), '{}_weights'.format(t.name): sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[1., 10., 2.], dense_shape=(2, 2)), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) def test_old_linear_model_old_numeric(self): """Tests linear_model. Uses data from test_get_sparse_tesnsors_simple. """ a = fc_old._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) bias = get_linear_model_bias() crossed_var = get_linear_model_column_var(crossed) with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) crossed = fc.crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) self.assertEqual([b, 'c'], crossed.parents) config = crossed._get_config() self.assertEqual({ 'hash_bucket_size': 5, 'hash_key': 5, 'keys': ({ 'config': { 'boundaries': (0, 1), 'source_column': { 'config': { 'dtype': 'int32', 'default_value': None, 'key': 'a', 'normalizer_fn': None, 'shape': (2,) }, 'class_name': 'NumericColumn' } }, 'class_name': 'BucketizedColumn' }, 'c') }, config) new_crossed = fc.CrossedColumn._from_config(config) self.assertEqual(crossed, new_crossed) self.assertIsNot(b, new_crossed.keys[0]) new_crossed = fc.CrossedColumn._from_config( config, columns_by_name={b.name: b}) self.assertEqual(crossed, new_crossed) self.assertIs(b, new_crossed.keys[0]) class LinearModelTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc.LinearModel(feature_columns=[]) def test_should_be_feature_column(self): with self.assertRaisesRegexp(ValueError, 'must be a FeatureColumn'): fc.LinearModel(feature_columns='NotSupported') def test_should_be_dense_or_categorical_column(self): class NotSupportedColumn(BaseFeatureColumnForTests): @property def _is_v2_column(self): return True @property def name(self): return 'NotSupportedColumn' def transform_feature(self, transformation_cache, state_manager): pass @property def parse_example_spec(self): pass with self.assertRaisesRegexp( ValueError, 'must be either a DenseColumn or CategoricalColumn'): fc.LinearModel(feature_columns=[NotSupportedColumn()]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.LinearModel(feature_columns={'a': fc.numeric_column('a')}) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc.LinearModel( feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')]) def test_not_dict_input_features(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = [[1.], [5.]] model = fc.LinearModel([price]) with self.assertRaisesRegexp(ValueError, 'We expected a dictionary here'): model(features) def test_dense_bias(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} model = fc.LinearModel([price]) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) sess.run(price_var.assign([[10.]])) sess.run(bias.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions)) def test_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast]) predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(wire_cast_var)) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_and_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) price = fc.numeric_column('price') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]} model = fc.LinearModel([wire_cast, price]) predictions = model(features) price_var, wire_cast_var, bias = model.variables with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) sess.run(price_var.assign([[10.]])) self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions)) def test_dense_and_sparse_column(self): """When the column is both dense and sparse, uses sparse tensors.""" class _DenseAndSparseColumn(BaseFeatureColumnForTests, fc.DenseColumn, fc.CategoricalColumn): @property def _is_v2_column(self): return True @property def name(self): return 'dense_and_sparse_column' @property def parse_example_spec(self): return {self.name: parsing_ops.VarLenFeature(self.dtype)} def transform_feature(self, transformation_cache, state_manager): return transformation_cache.get(self.name, state_manager) @property def variable_shape(self): raise ValueError('Should not use this method.') def get_dense_tensor(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') @property def num_buckets(self): return 4 def get_sparse_tensors(self, transformation_cache, state_manager): sp_tensor = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [1, 1]], values=[2, 0, 3], dense_shape=[2, 2]) return fc.CategoricalColumn.IdWeightPair(sp_tensor, None) dense_and_sparse_column = _DenseAndSparseColumn() with ops.Graph().as_default(): sp_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {dense_and_sparse_column.name: sp_tensor} model = fc.LinearModel([dense_and_sparse_column]) predictions = model(features) dense_and_sparse_column_var, bias = model.variables with _initialized_session() as sess: sess.run( dense_and_sparse_column_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_multi_output(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} model = fc.LinearModel([price], units=3) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[10., 100., 1000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]], self.evaluate(predictions)) def test_sparse_multi_output(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], units=3) predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var)) sess.run( wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [1000., 1100., 1200.], [10000., 11000., 12000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]], self.evaluate(predictions)) def test_dense_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} model = fc.LinearModel([price]) predictions = model(features) price_var, _ = model.variables with _initialized_session() as sess: self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_sparse_multi_rank(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = array_ops.sparse_placeholder(dtypes.string) wire_value = sparse_tensor.SparseTensorValue( values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2] indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]], dense_shape=[2, 2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast]) predictions = model(features) wire_cast_var, _ = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var)) self.assertAllClose( np.zeros((2, 1)), predictions.eval(feed_dict={wire_tensor: wire_value})) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.assertAllClose( [[1010.], [11000.]], predictions.eval(feed_dict={wire_tensor: wire_value})) def test_sparse_combiner(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], sparse_combiner='mean') predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions)) def test_sparse_combiner_sqrtn(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], sparse_combiner='sqrtn') predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: self.evaluate(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.evaluate(bias.assign([5.])) self.assertAllClose([[1005.], [7083.139]], self.evaluate(predictions)) def test_sparse_combiner_with_negative_weights(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = { 'wire_cast': wire_tensor, 'weights': constant_op.constant([[1., 1., -1.0]]) } model = fc.LinearModel([wire_cast_weights], sparse_combiner='sum') predictions = model(features) wire_cast_var, bias = model.variables with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions)) def test_dense_multi_dimension_multi_output(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} model = fc.LinearModel([price], units=3) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]])) sess.run(bias.assign([2., 3., 4.])) self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]], self.evaluate(predictions)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): model = fc.LinearModel([price]) model(features) def test_dense_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} model = fc.LinearModel([price]) predictions = model(features) price_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_dense_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} model = fc.LinearModel([price1, price2]) predictions = model(features) price1_var, price2_var, bias = model.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price1_var)) self.assertAllClose([[0.]], self.evaluate(price2_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price1_var.assign([[10.], [100.]])) sess.run(price2_var.assign([[1000.]])) sess.run(bias.assign([7.])) self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions)) def test_dense_trainable_default(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} model = fc.LinearModel([price]) model(features) price_var, bias = model.variables trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertIn(bias, trainable_vars) self.assertIn(price_var, trainable_vars) def test_sparse_trainable_default(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast]) model(features) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) wire_cast_var, bias = model.variables self.assertIn(bias, trainable_vars) self.assertIn(wire_cast_var, trainable_vars) def test_dense_trainable_false(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} model = fc.LinearModel([price], trainable=False) model(features) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_sparse_trainable_false(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} model = fc.LinearModel([wire_cast], trainable=False) model(features) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } model = fc.LinearModel([price_a, wire_cast, price_b]) model(features) my_vars = model.variables self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } model = fc.LinearModel([wire_cast, price_b, price_a]) model(features) my_vars = model.variables self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) def test_variable_names(self): price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) all_cols = [price1, dense_feature_bucketized, some_embedding_column] with ops.Graph().as_default(): model = fc.LinearModel(all_cols) features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } model(features) for var in model.variables: self.assertIsInstance(var, variables_lib.VariableV1) variable_names = [var.name for var in model.variables] self.assertItemsEqual([ 'linear_model/dense_feature_bucketized/weights:0', 'linear_model/price1/weights:0', 'linear_model/sparse_feature_embedding/embedding_weights:0', 'linear_model/sparse_feature_embedding/weights:0', 'linear_model/bias_weights:0', ], variable_names) def test_fit_and_predict(self): columns = [fc.numeric_column('a')] model = fc.LinearModel(columns) model.compile( optimizer=rmsprop.RMSPropOptimizer(1e-3), loss='categorical_crossentropy', metrics=['accuracy']) x = {'a': np.random.random((10, 1))} y = np.random.randint(20, size=(10, 1)) y = keras.utils.to_categorical(y, num_classes=20) model.fit(x, y, epochs=1, batch_size=5) model.fit(x, y, epochs=1, batch_size=5) model.evaluate(x, y, batch_size=5) model.predict(x, batch_size=5) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string model = fc.LinearModel([price1, price2]) model(features) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string model = fc.LinearModel([price1, price2, price3]) model(features) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } model = fc.LinearModel([price1, price2]) predictions = model(features) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'must have the same size and shape'): sess.run( predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } model = fc.LinearModel([price1, price2]) predictions = model(features) with _initialized_session() as sess: sess.run( predictions, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) @test_util.run_deprecated_v1 def test_with_numpy_input_fn(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) input_fn = numpy_io.numpy_input_fn( x={ 'price': np.array([-1., 2., 13., 104.]), 'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']), }, batch_size=2, shuffle=False) features = input_fn() model = fc.LinearModel([price_buckets, body_style]) net = model(features) # self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess, coord=coord) body_style_var, price_buckets_var, bias = model.variables sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [100 - 10 + 5.]], self.evaluate(net)) coord.request_stop() coord.join(threads) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ -1., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) model = fc.LinearModel([price_buckets, body_style]) net = model(features) with _initialized_session() as sess: body_style_var, price_buckets_var, bias = model.variables sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) price_data = np.array([-1., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array(['US', 'CA']) model = fc.LinearModel([price_buckets, body_style, country]) net = model(features) body_style_var, _, price_buckets_var, bias = model.variables with _initialized_session() as sess: sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): model = fc.LinearModel([price]) model(features) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } model = fc.LinearModel([price]) net = model(features) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) def test_multiple_linear_models(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features1 = {'price': [[1.], [5.]]} features2 = {'price': [[2.], [10.]]} model1 = fc.LinearModel([price]) model2 = fc.LinearModel([price]) predictions1 = model1(features1) predictions2 = model2(features2) price_var1, bias1 = model1.variables price_var2, bias2 = model2.variables with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias1)) sess.run(price_var1.assign([[10.]])) sess.run(bias1.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions1)) self.assertAllClose([0.], self.evaluate(bias2)) sess.run(price_var2.assign([[10.]])) sess.run(bias2.assign([5.])) self.assertAllClose([[25.], [105.]], self.evaluate(predictions2)) class OldLinearModelTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc_old.linear_model(features={}, feature_columns=[]) def test_should_be_feature_column(self): with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'): fc_old.linear_model(features={'a': [[0]]}, feature_columns='NotSupported') def test_should_be_dense_or_categorical_column(self): class NotSupportedColumn(BaseFeatureColumnForTests, fc.FeatureColumn, fc_old._FeatureColumn): @property def _is_v2_column(self): return True @property def name(self): return 'NotSupportedColumn' def transform_feature(self, transformation_cache, state_manager): pass def _transform_feature(self, inputs): pass @property def parse_example_spec(self): pass @property def _parse_example_spec(self): pass with self.assertRaisesRegexp( ValueError, 'must be either a _DenseColumn or _CategoricalColumn'): fc_old.linear_model( features={'a': [[0]]}, feature_columns=[NotSupportedColumn()]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc_old.linear_model( features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')}) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc_old.linear_model( features={'a': [[0]]}, feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')]) def test_dense_bias(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) sess.run(price_var.assign([[10.]])) sess.run(bias.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions)) def test_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model(features, [wire_cast]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(wire_cast_var)) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_and_sparse_bias(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) price = fc.numeric_column('price') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [wire_cast, price]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) sess.run(price_var.assign([[10.]])) self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions)) def test_dense_and_sparse_column(self): """When the column is both dense and sparse, uses sparse tensors.""" class _DenseAndSparseColumn(BaseFeatureColumnForTests, fc.DenseColumn, fc.CategoricalColumn, fc_old._DenseColumn, fc_old._CategoricalColumn): @property def _is_v2_column(self): return True @property def name(self): return 'dense_and_sparse_column' @property def parse_example_spec(self): return {self.name: parsing_ops.VarLenFeature(self.dtype)} @property def _parse_example_spec(self): return self.parse_example_spec def transform_feature(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') def _transform_feature(self, inputs): return inputs.get(self.name) @property def variable_shape(self): return self.variable_shape @property def _variable_shape(self): return self.variable_shape def get_dense_tensor(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') def _get_dense_tensor(self, inputs): raise ValueError('Should not use this method.') @property def num_buckets(self): return 4 @property def _num_buckets(self): return self.num_buckets def get_sparse_tensors(self, transformation_cache, state_manager): raise ValueError('Should not use this method.') def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): sp_tensor = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [1, 1]], values=[2, 0, 3], dense_shape=[2, 2]) return fc.CategoricalColumn.IdWeightPair(sp_tensor, None) dense_and_sparse_column = _DenseAndSparseColumn() with ops.Graph().as_default(): sp_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {dense_and_sparse_column.name: sp_tensor} predictions = fc_old.linear_model(features, [dense_and_sparse_column]) bias = get_linear_model_bias() dense_and_sparse_column_var = get_linear_model_column_var( dense_and_sparse_column) with _initialized_session() as sess: sess.run( dense_and_sparse_column_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_multi_output(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc_old.linear_model(features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[10., 100., 1000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]], self.evaluate(predictions)) def test_sparse_multi_output(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model(features, [wire_cast], units=3) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var)) sess.run( wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [1000., 1100., 1200.], [10000., 11000., 12000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]], self.evaluate(predictions)) def test_dense_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = fc_old.linear_model(features, [price]) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_sparse_multi_rank(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = array_ops.sparse_placeholder(dtypes.string) wire_value = sparse_tensor.SparseTensorValue( values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2] indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]], dense_shape=[2, 2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model(features, [wire_cast]) wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var)) self.assertAllClose( np.zeros((2, 1)), predictions.eval(feed_dict={wire_tensor: wire_value})) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.assertAllClose( [[1010.], [11000.]], predictions.eval(feed_dict={wire_tensor: wire_value})) def test_sparse_combiner(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc_old.linear_model( features, [wire_cast], sparse_combiner='mean') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions)) def test_sparse_combiner_with_negative_weights(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) wire_cast_weights = fc.weighted_categorical_column(wire_cast, 'weights') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = { 'wire_cast': wire_tensor, 'weights': constant_op.constant([[1., 1., -1.0]]) } predictions = fc_old.linear_model( features, [wire_cast_weights], sparse_combiner='sum') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions)) def test_dense_multi_dimension_multi_output(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = fc_old.linear_model(features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]])) sess.run(bias.assign([2., 3., 4.])) self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]], self.evaluate(predictions)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc_old.linear_model(features, [price]) def test_dense_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} predictions = fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_dense_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} predictions = fc_old.linear_model(features, [price1, price2]) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price1_var)) self.assertAllClose([[0.]], self.evaluate(price2_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price1_var.assign([[10.], [100.]])) sess.run(price2_var.assign([[1000.]])) sess.run(bias.assign([7.])) self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions)) def test_fills_cols_to_vars(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} cols_to_vars = {} fc_old.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) self.assertAllEqual(cols_to_vars['bias'], [bias]) self.assertAllEqual(cols_to_vars[price1], [price1_var]) self.assertAllEqual(cols_to_vars[price2], [price2_var]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2', shape=3) with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [6., 7.]], 'price2': [[3., 4., 5.], [8., 9., 10.]] } cols_to_vars = {} with variable_scope.variable_scope( 'linear', partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)): fc_old.linear_model( features, [price1, price2], cols_to_vars=cols_to_vars) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertEqual([0.], self.evaluate(cols_to_vars['bias'][0])) # Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables. self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price1][0])) self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price1][1])) # Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and # a [1, 1] Variable. self.assertAllEqual([[0.], [0.]], self.evaluate(cols_to_vars[price2][0])) self.assertAllEqual([[0.]], self.evaluate(cols_to_vars[price2][1])) def test_fills_cols_to_output_tensors(self): # Provide three _DenseColumn's to input_layer: a _NumericColumn, a # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn # creates a Variable. apple_numeric_column = fc.numeric_column('apple_numeric_column') banana_dense_feature = fc.numeric_column('banana_dense_feature') banana_dense_feature_bucketized = fc.bucketized_column( banana_dense_feature, boundaries=[0.]) cherry_sparse_column = fc.categorical_column_with_hash_bucket( 'cherry_sparse_feature', hash_bucket_size=5) dragonfruit_embedding_column = fc.embedding_column( cherry_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'apple_numeric_column': [[3.], [4.]], 'banana_dense_feature': [[-1.], [4.]], 'cherry_sparse_feature': [['a'], ['x']], } cols_to_output_tensors = {} all_cols = [ apple_numeric_column, banana_dense_feature_bucketized, dragonfruit_embedding_column ] input_layer = fc_old.input_layer( features, all_cols, cols_to_output_tensors=cols_to_output_tensors) # We check the mapping by checking that we have the right keys, # and that the values (output_tensors) were indeed the ones used to # form the input layer. self.assertItemsEqual(all_cols, cols_to_output_tensors.keys()) input_layer_inputs = [tensor for tensor in input_layer.op.inputs[:-1]] output_tensors = [tensor for tensor in cols_to_output_tensors.values()] self.assertItemsEqual(input_layer_inputs, output_tensors) def test_dense_collection(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc_old.linear_model(features, [price], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) self.assertIn(bias, my_vars) self.assertIn(price_var, my_vars) def test_sparse_collection(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc_old.linear_model(features, [wire_cast], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, my_vars) self.assertIn(wire_cast_var, my_vars) def test_dense_trainable_default(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc_old.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertIn(bias, trainable_vars) self.assertIn(price_var, trainable_vars) def test_sparse_trainable_default(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc_old.linear_model(features, [wire_cast]) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, trainable_vars) self.assertIn(wire_cast_var, trainable_vars) def test_dense_trainable_false(self): price = fc.numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc_old.linear_model(features, [price], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_sparse_trainable_false(self): wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc_old.linear_model(features, [wire_cast], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') wire_cast = fc.categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } fc_old.linear_model( features, [price_a, wire_cast, price_b], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } fc_old.linear_model( features, [wire_cast, price_b, price_a], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.linear_model(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.linear_model(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } predictions = fc_old.linear_model(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'must have the same size and shape'): sess.run( predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } predictions = fc_old.linear_model(features, [price1, price2]) with _initialized_session() as sess: sess.run( predictions, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ -1., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) net = fc_old.linear_model(features, [price_buckets, body_style]) with _initialized_session() as sess: bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): price = fc.numeric_column('price') price_buckets = fc.bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) price_data = np.array([-1., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array(['US', 'CA']) net = fc_old.linear_model(features, [price_buckets, body_style, country]) bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) with _initialized_session() as sess: sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc_old.linear_model(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc_old.linear_model(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) def test_multiple_linear_models(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features1 = {'price': [[1.], [5.]]} features2 = {'price': [[2.], [10.]]} predictions1 = fc_old.linear_model(features1, [price]) predictions2 = fc_old.linear_model(features2, [price]) bias1 = get_linear_model_bias(name='linear_model') bias2 = get_linear_model_bias(name='linear_model_1') price_var1 = get_linear_model_column_var(price, name='linear_model') price_var2 = get_linear_model_column_var(price, name='linear_model_1') with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias1)) sess.run(price_var1.assign([[10.]])) sess.run(bias1.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions1)) self.assertAllClose([0.], self.evaluate(bias2)) sess.run(price_var2.assign([[10.]])) sess.run(bias2.assign([5.])) self.assertAllClose([[25.], [105.]], self.evaluate(predictions2)) @test_util.run_deprecated_v1 def test_linear_model_v1_shared_embedding_all_other_v2(self): price = fc.numeric_column('price') # v2 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v2 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v2 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) # v1 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc_old.linear_model(features, all_cols) bias = get_linear_model_bias() self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([0.], self.evaluate(bias)) @test_util.run_deprecated_v1 def test_linear_model_v1_shared_embedding_with_v2_cat_all_other_v2(self): price = fc.numeric_column('price') # v2 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v2 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v2 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) # v1 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc_old.linear_model(features, all_cols) bias = get_linear_model_bias() self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([0.], self.evaluate(bias)) @test_util.run_deprecated_v1 def test_linear_model_v1_v2_mix(self): price = fc.numeric_column('price') # v2 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v1 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v1 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) # v1 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc_old.linear_model(features, all_cols) bias = get_linear_model_bias() self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([0.], self.evaluate(bias)) @test_util.run_deprecated_v1 def test_linear_model_v2_shared_embedding_all_other_v1(self): price = fc.numeric_column('price') # v1 some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) # v1 some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) # v1 categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) # v2 categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) # v2 shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=2) # v2 all_cols = [ price, some_embedding_column, shared_embedding_a, shared_embedding_b ] with ops.Graph().as_default(): features = { 'price': [[3.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } with self.assertRaisesRegexp(ValueError, 'SharedEmbeddingColumns are not supported'): fc_old.linear_model(features, all_cols) class DenseFeaturesTest(test.TestCase): @test_util.run_in_graph_and_eager_modes() def test_retrieving_input(self): features = {'a': [0.]} dense_features = fc.DenseFeatures(fc.numeric_column('a')) inputs = self.evaluate(dense_features(features)) self.assertAllClose([[0.]], inputs) def test_reuses_variables(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) dense_features = fc.DenseFeatures([embedding_column]) features = {'a': sparse_input} inputs = dense_features(features) variables = dense_features.variables # Sanity check: test that the inputs are correct. self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs) # Check that only one variable was created. self.assertEqual(1, len(variables)) # Check that invoking dense_features on the same features does not create # additional variables _ = dense_features(features) self.assertEqual(1, len(variables)) self.assertEqual(variables[0], dense_features.variables[0]) def test_feature_column_dense_features_gradient(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) dense_features = fc.DenseFeatures([embedding_column]) features = {'a': sparse_input} def scale_matrix(): matrix = dense_features(features) return 2 * matrix # Sanity check: Verify that scale_matrix returns the correct output. self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix()) # Check that the returned gradient is correct. grad_function = backprop.implicit_grad(scale_matrix) grads_and_vars = grad_function() indexed_slice = grads_and_vars[0][0] gradient = grads_and_vars[0][0].values self.assertAllEqual([0, 1, 2], indexed_slice.indices) self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient) def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc.DenseFeatures(feature_columns=[])(features={}) def test_should_be_dense_column(self): with self.assertRaisesRegexp(ValueError, 'must be a .*DenseColumn'): fc.DenseFeatures(feature_columns=[ fc.categorical_column_with_hash_bucket('wire_cast', 4) ])( features={ 'a': [[0]] }) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.DenseFeatures(feature_columns={'a': fc.numeric_column('a')})( features={ 'a': [[0]] }) def test_bare_column(self): with ops.Graph().as_default(): features = features = {'a': [0.]} net = fc.DenseFeatures(fc.numeric_column('a'))(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0.]], self.evaluate(net)) def test_column_generator(self): with ops.Graph().as_default(): features = features = {'a': [0.], 'b': [1.]} columns = (fc.numeric_column(key) for key in features) net = fc.DenseFeatures(columns)(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1.]], self.evaluate(net)) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc.DenseFeatures( feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')])( features={ 'a': [[0]] }) def test_one_column(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} net = fc.DenseFeatures([price])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1.], [5.]], self.evaluate(net)) def test_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} net = fc.DenseFeatures([price])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_compute_output_shape(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2', shape=4) with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [5., 6.]], 'price2': [[3., 4., 5., 6.], [7., 8., 9., 10.]] } dense_features = fc.DenseFeatures([price1, price2]) self.assertEqual((None, 6), dense_features.compute_output_shape((None,))) net = dense_features(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2., 3., 4., 5., 6.], [5., 6., 7., 8., 9., 10.]], self.evaluate(net)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc.DenseFeatures([price])(features) def test_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} net = fc.DenseFeatures([price])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} net = fc.DenseFeatures([price1, price2])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_cols_to_output_tensors(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): cols_dict = {} features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} dense_features = fc.DenseFeatures([price1, price2]) net = dense_features(features, cols_dict) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(cols_dict[price1])) self.assertAllClose([[3.], [4.]], self.evaluate(cols_dict[price2])) self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], } net1 = fc.DenseFeatures([price_a, price_b])(features) net2 = fc.DenseFeatures([price_b, price_a])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 3.]], self.evaluate(net1)) self.assertAllClose([[1., 3.]], self.evaluate(net2)) def test_fails_for_categorical_column(self): animal = fc.categorical_column_with_identity('animal', num_buckets=4) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } with self.assertRaisesRegexp(Exception, 'must be a .*DenseColumn'): fc.DenseFeatures([animal])(features) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.DenseFeatures([price1, price2])(features) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.DenseFeatures([price1, price2, price3])(features) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } net = fc.DenseFeatures([price1, price2])(features) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'Dimensions of inputs should match'): sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } net = fc.DenseFeatures([price1, price2])(features) with _initialized_session() as sess: sess.run( net, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) def test_multiple_layers_with_same_embedding_column(self): some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'sparse_feature': [['a'], ['x']], } all_cols = [some_embedding_column] fc.DenseFeatures(all_cols)(features) fc.DenseFeatures(all_cols)(features) # Make sure that 2 variables get created in this case. self.assertEqual(2, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) expected_var_names = [ 'dense_features/sparse_feature_embedding/embedding_weights:0', 'dense_features_1/sparse_feature_embedding/embedding_weights:0' ] self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_multiple_layers_with_same_shared_embedding_column(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) with ops.Graph().as_default(): features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } all_cols = [embedding_column_a, embedding_column_b] fc.DenseFeatures(all_cols)(features) fc.DenseFeatures(all_cols)(features) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertItemsEqual( ['aaa_bbb_shared_embedding:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) all_cols = [embedding_column_a, embedding_column_b] with ops.Graph().as_default(): features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.DenseFeatures(all_cols)(features) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) with ops.Graph().as_default(): features1 = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.DenseFeatures(all_cols)(features1) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertItemsEqual( ['aaa_bbb_shared_embedding:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_with_numpy_input_fn(self): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in dense_features price = fc.numeric_column('price') body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # one_hot_body_style has 3 dims in dense_features. one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in dense_features. embedded_body_style = fc.embedding_column( body_style, dimension=5, initializer=_initializer) input_fn = numpy_io.numpy_input_fn( x={ 'price': np.array([11., 12., 13., 14.]), 'body-style': np.array(['sedan', 'hardtop', 'wagon', 'sedan']), }, batch_size=2, shuffle=False) features = input_fn() net = fc.DenseFeatures([price, one_hot_body_style, embedded_body_style])( features) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess, coord=coord) # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual([[11., 12., 13., 14., 15., 0., 0., 1., 11.], [1., 2., 3., 4., 5., 1., 0., 0., 12]], sess.run(net)) coord.request_stop() coord.join(threads) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in dense_features price = fc.numeric_column('price') # one_hot_body_style has 3 dims in dense_features. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in dense_features. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=5, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ 11., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), # This is dense tensor for the categorical_column. 'country': constant_op.constant(['CA', 'US']), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) self.assertEqual(1, features['country'].shape.ndims) net = fc.DenseFeatures([price, one_hot_body_style, embedded_country])( features) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.], [1., 0., 0., 1., 2., 3., 4., 5., 12.]], sess.run(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): embedding_values = ( (1., 2.), # id 0 (6., 7.), # id 1 (11., 12.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in dense_features price = fc.numeric_column('price') # one_hot_body_style has 3 dims in dense_features. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in dense_features. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=2, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), # This is dense tensor for the categorical_column. 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) self.assertIsNone(features['country'].shape.ndims) price_data = np.array([11., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array([['US'], ['CA']]) net = fc.DenseFeatures([price, one_hot_body_style, embedded_country])( features) self.assertEqual(1 + 3 + 2, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): # price has 1 dimension in dense_features price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc.DenseFeatures([price])(features) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc.DenseFeatures([price])(features) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) class InputLayerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_retrieving_input(self): features = {'a': [0.]} input_layer = fc_old.InputLayer(fc.numeric_column('a')) inputs = self.evaluate(input_layer(features)) self.assertAllClose([[0.]], inputs) def test_reuses_variables(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) input_layer = fc_old.InputLayer([embedding_column]) features = {'a': sparse_input} inputs = input_layer(features) variables = input_layer.variables # Sanity check: test that the inputs are correct. self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs) # Check that only one variable was created. self.assertEqual(1, len(variables)) # Check that invoking input_layer on the same features does not create # additional variables _ = input_layer(features) self.assertEqual(1, len(variables)) self.assertEqual(variables[0], input_layer.variables[0]) def test_feature_column_input_layer_gradient(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc.categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) input_layer = fc_old.InputLayer([embedding_column]) features = {'a': sparse_input} def scale_matrix(): matrix = input_layer(features) return 2 * matrix # Sanity check: Verify that scale_matrix returns the correct output. self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix()) # Check that the returned gradient is correct. grad_function = backprop.implicit_grad(scale_matrix) grads_and_vars = grad_function() indexed_slice = grads_and_vars[0][0] gradient = grads_and_vars[0][0].values self.assertAllEqual([0, 1, 2], indexed_slice.indices) self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient) class FunctionalInputLayerTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc_old.input_layer(features={}, feature_columns=[]) def test_should_be_dense_column(self): with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'): fc_old.input_layer( features={'a': [[0]]}, feature_columns=[ fc.categorical_column_with_hash_bucket('wire_cast', 4) ]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc_old.input_layer( features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')}) def test_bare_column(self): with ops.Graph().as_default(): features = features = {'a': [0.]} net = fc_old.input_layer(features, fc.numeric_column('a')) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0.]], self.evaluate(net)) def test_column_generator(self): with ops.Graph().as_default(): features = features = {'a': [0.], 'b': [1.]} columns = (fc.numeric_column(key) for key in features) net = fc_old.input_layer(features, columns) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1.]], self.evaluate(net)) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc_old.input_layer( features={'a': [[0]]}, feature_columns=[fc.numeric_column('a'), fc.numeric_column('a')]) def test_one_column(self): price = fc.numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} net = fc_old.input_layer(features, [price]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1.], [5.]], self.evaluate(net)) def test_multi_dimension(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} net = fc_old.input_layer(features, [price]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_raises_if_shape_mismatch(self): price = fc.numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc_old.input_layer(features, [price]) def test_reshaping(self): price = fc.numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} net = fc_old.input_layer(features, [price]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_multi_column(self): price1 = fc.numeric_column('price1', shape=2) price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} net = fc_old.input_layer(features, [price1, price2]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_fills_cols_to_vars(self): # Provide three _DenseColumn's to input_layer: a _NumericColumn, a # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn # creates a Variable. price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.VariableV1) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) @test_util.run_deprecated_v1 def test_fills_cols_to_vars_shared_embedding(self): # Provide 5 DenseColumn's to input_layer: a NumericColumn, a # BucketizedColumn, an EmbeddingColumn, two SharedEmbeddingColumns. The # EmbeddingColumn creates a Variable and the two SharedEmbeddingColumns # shared one variable. price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } cols_to_vars = {} all_cols = [ price1, dense_feature_bucketized, some_embedding_column, shared_embedding_a, shared_embedding_b ] fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertEqual(1, len(cols_to_vars[shared_embedding_a])) # This is a bug in the current implementation and should be fixed in the # new one. self.assertEqual(0, len(cols_to_vars[shared_embedding_b])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) self.assertIsInstance(cols_to_vars[shared_embedding_a][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[shared_embedding_a][0].shape, [3, 2]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc.numeric_column('price1') dense_feature = fc.numeric_column('dense_feature') dense_feature_bucketized = fc.bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] with variable_scope.variable_scope( 'input_from_feature_columns', partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)): fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(3, len(cols_to_vars[some_embedding_column])) self.assertEqual( 'input_from_feature_columns/input_layer/sparse_feature_embedding/' 'embedding_weights/part_0:0', cols_to_vars[some_embedding_column][0].name) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10]) def test_column_order(self): price_a = fc.numeric_column('price_a') price_b = fc.numeric_column('price_b') with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], } net1 = fc_old.input_layer(features, [price_a, price_b]) net2 = fc_old.input_layer(features, [price_b, price_a]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[1., 3.]], self.evaluate(net1)) self.assertAllClose([[1., 3.]], self.evaluate(net2)) def test_fails_for_categorical_column(self): animal = fc.categorical_column_with_identity('animal', num_buckets=4) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'): fc_old.input_layer(features, [animal]) def test_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.input_layer(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') price3 = fc.numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc_old.input_layer(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } net = fc_old.input_layer(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'Dimensions of inputs should match'): sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc.numeric_column('price1') price2 = fc.numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } net = fc_old.input_layer(features, [price1, price2]) with _initialized_session() as sess: sess.run( net, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) def test_multiple_layers_with_same_embedding_column(self): some_sparse_column = fc.categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc.embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'sparse_feature': [['a'], ['x']], } all_cols = [some_embedding_column] fc_old.input_layer(features, all_cols) fc_old.input_layer(features, all_cols) # Make sure that 2 variables get created in this case. self.assertEqual(2, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) expected_var_names = [ 'input_layer/sparse_feature_embedding/embedding_weights:0', 'input_layer_1/sparse_feature_embedding/embedding_weights:0' ] self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc.numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=5, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ 11., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), # This is dense tensor for the categorical_column. 'country': constant_op.constant(['CA', 'US']), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) self.assertEqual(1, features['country'].shape.ndims) net = fc_old.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.], [1., 0., 0., 1., 2., 3., 4., 5., 12.]], sess.run(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): embedding_values = ( (1., 2.), # id 0 (6., 7.), # id 1 (11., 12.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc.numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc.categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc.indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc.categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc.embedding_column( country, dimension=2, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), # This is dense tensor for the categorical_column. 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) self.assertIsNone(features['country'].shape.ndims) price_data = np.array([11., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array([['US'], ['CA']]) net = fc_old.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 2, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): # price has 1 dimension in input_layer price = fc.numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc_old.input_layer(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc_old.input_layer(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) class MakeParseExampleSpecTest(test.TestCase): class _TestFeatureColumn(BaseFeatureColumnForTests, collections.namedtuple('_TestFeatureColumn', ('parse_spec'))): @property def _is_v2_column(self): return True @property def name(self): return '_TestFeatureColumn' def transform_feature(self, transformation_cache, state_manager): pass def _transform_feature(self, inputs): pass @property def parse_example_spec(self): return self.parse_spec @property def _parse_example_spec(self): return self.parse_spec def test_no_feature_columns(self): actual = fc.make_parse_example_spec_v2([]) self.assertDictEqual({}, actual) def test_invalid_type(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) with self.assertRaisesRegexp( ValueError, 'All feature_columns must be FeatureColumn instances.*invalid_column'): fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), 'invalid_column')) def test_one_feature_column(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }),)) self.assertDictEqual({key1: parse_spec1}, actual) def test_two_feature_columns(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) key2 = 'key2' parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key2: parse_spec2 }))) self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual) def test_equal_keys_different_parse_spec(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) with self.assertRaisesRegexp( ValueError, 'feature_columns contain different parse_spec for key key1'): fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key1: parse_spec2 }))) def test_equal_keys_equal_parse_spec(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key1: parse_spec1 }))) self.assertDictEqual({key1: parse_spec1}, actual) def test_multiple_features_dict(self): """parse_spc for one column is a dict with length > 1.""" key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) key2 = 'key2' parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) key3 = 'key3' parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32) actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({ key1: parse_spec1 }), self._TestFeatureColumn({ key2: parse_spec2, key3: parse_spec3 }))) self.assertDictEqual({ key1: parse_spec1, key2: parse_spec2, key3: parse_spec3 }, actual) def _assert_sparse_tensor_value(test_case, expected, actual): test_case.assertEqual(np.int64, np.array(actual.indices).dtype) test_case.assertAllEqual(expected.indices, actual.indices) test_case.assertEqual( np.array(expected.values).dtype, np.array(actual.values).dtype) test_case.assertAllEqual(expected.values, actual.values) test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype) test_case.assertAllEqual(expected.dense_shape, actual.dense_shape) class VocabularyFileCategoricalColumnTest(test.TestCase): def setUp(self): super(VocabularyFileCategoricalColumnTest, self).setUp() # Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22 self._warriors_vocabulary_file_name = test.test_src_dir_path( 'python/feature_column/testdata/warriors_vocabulary.txt') self._warriors_vocabulary_size = 5 # Contains strings, character names from 'The Wire': omar, stringer, marlo self._wire_vocabulary_file_name = test.test_src_dir_path( 'python/feature_column/testdata/wire_vocabulary.txt') self._wire_vocabulary_size = 3 @test_util.run_deprecated_v1 def test_defaults(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_vocabulary_file( key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3) @test_util.run_deprecated_v1 def test_all_constructor_args(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3, num_oov_buckets=4, dtype=dtypes.int32) self.assertEqual(7, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3, num_oov_buckets=4, dtype=dtypes.int32) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(7, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) def test_vocabulary_file_none(self): with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=None, vocabulary_size=3) def test_vocabulary_file_empty_string(self): with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='', vocabulary_size=3) @test_util.run_deprecated_v1 def test_invalid_vocabulary_file(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'): self.evaluate(lookup_ops.tables_initializer()) def test_invalid_vocabulary_size(self): with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=-1) with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=0) @test_util.run_deprecated_v1 def test_too_large_vocabulary_size(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size + 1) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'): self.evaluate(lookup_ops.tables_initializer()) def test_invalid_num_oov_buckets(self): with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path', vocabulary_size=3, num_oov_buckets=-1) def test_invalid_dtype(self): with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path', vocabulary_size=3, dtype=dtypes.float64) def test_invalid_buckets_and_default_value(self): with self.assertRaisesRegexp(ValueError, 'both num_oov_buckets and default_value'): fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=100, default_value=2) def test_invalid_input_dtype_int32(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, dtype=dtypes.string) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(12, 24, 36), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) def test_invalid_input_dtype_string(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_none_vocabulary_size(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_tensor = fc._transform_features_v2({ 'aaa': inputs }, [column], None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': (('marlo', ''), ('skywalker', 'omar')) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((2, -1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_default_value_in_vocabulary(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, default_value=2) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 2, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (1, 2)), values=('marlo', 'skywalker', 'omar', 'heisenberg'), dense_shape=(2, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 33, 0, 62), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_small_vocabulary_size(self): # 'marlo' is the last entry in our vocabulary file, so be setting # `vocabulary_size` to 1 less than number of entries in file, we take # 'marlo' out of the vocabulary. column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size - 1) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((-1, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_dense_input(self): default_value = -100 column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32, default_value=default_value) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22)) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((2, default_value, 0, 4), dtype=np.int64), dense_shape=(3, 3)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 60, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((wire_column,)) predictions = model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }) wire_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) def test_old_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): wire_column = fc.categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(['wire'], wire_column.parents) config = wire_column._get_config() self.assertEqual({ 'default_value': -1, 'dtype': 'string', 'key': 'wire', 'num_oov_buckets': 1, 'vocabulary_file': self._wire_vocabulary_file_name, 'vocabulary_size': 3 }, config) self.assertEqual(wire_column, fc.VocabularyFileCategoricalColumn._from_config(config)) class VocabularyListCategoricalColumnTest(test.TestCase): def test_defaults_string(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_vocabulary_list( key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo')) def test_defaults_int(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36)) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column.parse_example_spec) @test_util.run_deprecated_v1 def test_all_constructor_args(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32, default_value=-99) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column.parse_example_spec) def test_invalid_dtype(self): with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), dtype=dtypes.float32) def test_invalid_mapping_dtype(self): with self.assertRaisesRegexp(ValueError, r'vocabulary dtype must be string or integer'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12., 24., 36.)) def test_mismatched_int_dtype(self): with self.assertRaisesRegexp(ValueError, r'dtype.*and vocabulary dtype.*do not match'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), dtype=dtypes.int32) def test_mismatched_string_dtype(self): with self.assertRaisesRegexp(ValueError, r'dtype.*and vocabulary dtype.*do not match'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string) def test_none_mapping(self): with self.assertRaisesRegexp(ValueError, r'vocabulary_list.*must be non-empty'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=None) def test_empty_mapping(self): with self.assertRaisesRegexp(ValueError, r'vocabulary_list.*must be non-empty'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=tuple([])) def test_duplicate_mapping(self): with self.assertRaisesRegexp(ValueError, 'Duplicate keys'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 12)) def test_invalid_num_oov_buckets(self): with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=-1) def test_invalid_buckets_and_default_value(self): with self.assertRaisesRegexp(ValueError, 'both num_oov_buckets and default_value'): fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=100, default_value=2) def test_invalid_input_dtype_int32(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(12, 24, 36), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) def test_invalid_input_dtype_string(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36)) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) @test_util.run_deprecated_v1 def test_parse_example_string(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_parse_example_int(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(11, 21, 31)) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[11, 21])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=[11, 21], dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_tensor = fc._transform_features_v2({ 'aaa': inputs }, [column], None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': (('marlo', ''), ('skywalker', 'omar')) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((2, -1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_default_value_in_vocabulary(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), default_value=2) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 2, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (1, 2)), values=('marlo', 'skywalker', 'omar', 'heisenberg'), dense_shape=(2, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 33, 0, 62), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((11, 100, 30, 22), dtype=np.int32), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_dense_input(self): default_value = -100 column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32, default_value=default_value) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': np.array(((11, -1, -1), (100, 30, -1), (-1, -1, 22)), dtype=np.int32) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((2, default_value, 0, 4), dtype=np.int64), dense_shape=(3, 3)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_with_oov_buckets(self): column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 60, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((wire_column,)) predictions = model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }) wire_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) def test_old_linear_model(self): wire_column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(4, wire_column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(wire_var.assign(((1.,), (2.,), (3.,), (4.,)))) # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): wire_column = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(['aaa'], wire_column.parents) config = wire_column._get_config() self.assertEqual({ 'default_value': -1, 'dtype': 'string', 'key': 'aaa', 'num_oov_buckets': 1, 'vocabulary_list': ('omar', 'stringer', 'marlo') }, config) self.assertEqual(wire_column, fc.VocabularyListCategoricalColumn._from_config(config)) class IdentityCategoricalColumnTest(test.TestCase): def test_constructor(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc.categorical_column_with_identity(key=('aaa',), num_buckets=3) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc.categorical_column_with_identity(key='aaa', num_buckets=3) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column.parse_example_spec) def test_invalid_num_buckets_zero(self): with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'): fc.categorical_column_with_identity(key='aaa', num_buckets=0) def test_invalid_num_buckets_negative(self): with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'): fc.categorical_column_with_identity(key='aaa', num_buckets=-1) def test_invalid_default_value_too_small(self): with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'): fc.categorical_column_with_identity( key='aaa', num_buckets=3, default_value=-1) def test_invalid_default_value_too_big(self): with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'): fc.categorical_column_with_identity( key='aaa', num_buckets=3, default_value=3) def test_invalid_input_dtype(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'): column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_identity(key='aaa', num_buckets=30) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[11, 21])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([11, 21], dtype=np.int64), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((0, 1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) id_tensor = fc._transform_features_v2({ 'aaa': inputs }, [column], None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((0, 1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': ((0, -1), (1, 0)) }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((0, 1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_inputs_too_small(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, -1, 0), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with self.assertRaisesRegexp(errors.OpError, 'assert_greater_or_equal_0'): self.evaluate(id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_inputs_too_big(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, 99, 0), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with self.assertRaisesRegexp(errors.OpError, 'assert_less_than_num_buckets'): self.evaluate(id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_default_value(self): column = fc.categorical_column_with_identity( key='aaa', num_buckets=4, default_value=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, -1, 99), dense_shape=(2, 2)) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((1, 3, 3), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_weight_pair.id_tensor)) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self): column = fc.categorical_column_with_identity( key='aaa', num_buckets=4, default_value=3) input_indices = array_ops.placeholder(dtype=dtypes.int64) input_values = array_ops.placeholder(dtype=dtypes.int32) input_shape = array_ops.placeholder(dtype=dtypes.int64) inputs = sparse_tensor.SparseTensorValue( indices=input_indices, values=input_values, dense_shape=input_shape) id_weight_pair = column.get_sparse_tensors( fc.FeatureTransformationCache({ 'aaa': inputs }), None) self.assertIsNone(id_weight_pair.weight_tensor) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64), values=np.array((1, 3, 3), dtype=np.int64), dense_shape=np.array((2, 2), dtype=np.int64)), id_weight_pair.id_tensor.eval( feed_dict={ input_indices: ((0, 0), (1, 0), (1, 1)), input_values: (1, -1, 99), input_shape: (2, 2), })) @test_util.run_deprecated_v1 def test_linear_model(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(3, column.num_buckets) with ops.Graph().as_default(): model = fc.LinearModel((column,)) predictions = model({ column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)) }) weight_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] = 1 # weight_var[2] + weight_var[1] = 3+2 = 5 self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions)) def test_old_linear_model(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(3, column.num_buckets) with ops.Graph().as_default(): predictions = fc_old.linear_model({ column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] = 1 # weight_var[2] + weight_var[1] = 3+2 = 5 self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization(self): column = fc.categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(['aaa'], column.parents) config = column._get_config() self.assertEqual({ 'default_value': None, 'key': 'aaa', 'number_buckets': 3 }, config) self.assertEqual(column, fc.IdentityCategoricalColumn._from_config(config)) class TransformFeaturesTest(test.TestCase): # All transform tests are distributed in column test. # Here we only test multi column case and naming def transform_multi_column(self): bucketized_price = fc.bucketized_column( fc.numeric_column('price'), boundaries=[0, 2, 4, 6]) hashed_sparse = fc.categorical_column_with_hash_bucket('wire', 10) with ops.Graph().as_default(): features = { 'price': [[-1.], [5.]], 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) } transformed = fc._transform_features_v2( features, [bucketized_price, hashed_sparse], None) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertIn(bucketized_price.name, transformed[bucketized_price].name) self.assertAllEqual([[0], [3]], self.evaluate(transformed[bucketized_price])) self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name) self.assertAllEqual([6, 4, 1], self.evaluate(transformed[hashed_sparse].values)) def test_column_order(self): """When the column is both dense and sparse, uses sparse tensors.""" class _LoggerColumn(BaseFeatureColumnForTests): def __init__(self, name): self._name = name @property def _is_v2_column(self): return True @property def name(self): return self._name def transform_feature(self, transformation_cache, state_manager): self.call_order = call_logger['count'] call_logger['count'] += 1 return 'Anything' @property def parse_example_spec(self): pass with ops.Graph().as_default(): column1 = _LoggerColumn('1') column2 = _LoggerColumn('2') call_logger = {'count': 0} fc._transform_features_v2({}, [column1, column2], None) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order) call_logger = {'count': 0} fc._transform_features_v2({}, [column2, column1], None) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order) class IndicatorColumnTest(test.TestCase): def test_indicator_column(self): a = fc.categorical_column_with_hash_bucket('a', 4) indicator_a = fc.indicator_column(a) self.assertEqual(indicator_a.categorical_column.name, 'a') self.assertEqual(indicator_a.name, 'a_indicator') self.assertEqual(indicator_a.variable_shape, [1, 4]) self.assertTrue(indicator_a._is_v2_column) b = fc_old._categorical_column_with_hash_bucket('b', hash_bucket_size=100) indicator_b = fc.indicator_column(b) self.assertEqual(indicator_b.categorical_column.name, 'b') self.assertEqual(indicator_b.name, 'b_indicator') self.assertEqual(indicator_b.variable_shape, [1, 100]) self.assertFalse(indicator_b._is_v2_column) def test_1D_shape_succeeds(self): animal = fc.indicator_column( fc.categorical_column_with_hash_bucket('animal', 4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': ['fox', 'fox'] }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], self.evaluate(output)) def test_2D_shape_succeeds(self): # TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready. animal = fc.indicator_column( fc.categorical_column_with_hash_bucket('animal', 4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0]], values=['fox', 'fox'], dense_shape=[2, 1]) }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], self.evaluate(output)) def test_multi_hot(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2]) }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 2., 0., 0.]], self.evaluate(output)) def test_multi_hot2(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) transformation_cache = fc.FeatureTransformationCache({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) }) output = transformation_cache.get(animal, None) self.assertAllEqual([[0., 1., 1., 0.]], self.evaluate(output)) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc.categorical_column_with_hash_bucket('a', 4) column = fc.indicator_column(a) column_copy = copy.deepcopy(column) self.assertEqual(column_copy.categorical_column.name, 'a') self.assertEqual(column.name, 'a_indicator') self.assertEqual(column.variable_shape, [1, 4]) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_indicator = fc.indicator_column(a) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_indicator])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_transform(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_indicator = fc.indicator_column(a) features = { 'aaa': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) } indicator_tensor = fc._transform_features_v2(features, [a_indicator], None)[a_indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0, 0, 1], [1, 0, 0]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_weighted_column(self): # Github issue 12557 ids = fc.categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) weights = fc.weighted_categorical_column(ids, 'weights') indicator = fc.indicator_column(weights) features = { 'ids': constant_op.constant([['c', 'b', 'a', 'c']]), 'weights': constant_op.constant([[2., 4., 6., 1.]]) } indicator_tensor = fc._transform_features_v2(features, [indicator], None)[indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[6., 4., 3.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_missing_value_in_weighted_column(self): # Github issue 12583 ids = fc.categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) weights = fc.weighted_categorical_column(ids, 'weights') indicator = fc.indicator_column(weights) features = { 'ids': constant_op.constant([['c', 'b', 'unknown']]), 'weights': constant_op.constant([[2., 4., 6.]]) } indicator_tensor = fc._transform_features_v2(features, [indicator], None)[indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0., 4., 2.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_missing_value_in_categorical_column(self): # Github issue 12583 ids = fc.categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) indicator = fc.indicator_column(ids) features = { 'ids': constant_op.constant([['c', 'b', 'unknown']]), } indicator_tensor = fc._transform_features_v2(features, [indicator], None)[indicator] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual([[0., 1., 1.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } model = fc.LinearModel([animal]) predictions = model(features) weight_var, _ = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]])) self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) def test_old_linear_model(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } predictions = fc_old.linear_model(features, [animal]) weight_var = get_linear_model_column_var(animal) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]])) self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) def test_old_linear_model_old_categorical(self): animal = fc.indicator_column( fc_old._categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } predictions = fc_old.linear_model(features, [animal]) weight_var = get_linear_model_column_var(animal) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) self.evaluate(weight_var.assign([[1.], [2.], [3.], [4.]])) self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_dense_features(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } net = fc.DenseFeatures([animal])(features) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_input_layer(self): animal = fc.indicator_column( fc.categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } net = fc_old.input_layer(features, [animal]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net)) def test_input_layer_old_categorical(self): animal = fc.indicator_column( fc_old._categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } net = fc_old.input_layer(features, [animal]) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_serialization(self): parent = fc.categorical_column_with_identity('animal', num_buckets=4) animal = fc.indicator_column(parent) self.assertEqual([parent], animal.parents) config = animal._get_config() self.assertEqual({ 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'key': 'animal', 'default_value': None, 'number_buckets': 4 } } }, config) new_animal = fc.IndicatorColumn._from_config(config) self.assertEqual(animal, new_animal) self.assertIsNot(parent, new_animal.categorical_column) new_animal = fc.IndicatorColumn._from_config( config, columns_by_name={parent.name: parent}) self.assertEqual(animal, new_animal) self.assertIs(parent, new_animal.categorical_column) class _TestStateManager(fc.StateManager): def __init__(self, trainable=True): # Dict of feature_column to a dict of variables. self._all_variables = {} self._trainable = trainable def create_variable(self, feature_column, name, shape, dtype=None, trainable=True, use_resource=True, initializer=None): if feature_column not in self._all_variables: self._all_variables[feature_column] = {} var_dict = self._all_variables[feature_column] if name in var_dict: return var_dict[name] else: var = variable_scope.get_variable( name=name, shape=shape, dtype=dtype, trainable=self._trainable and trainable, use_resource=use_resource, initializer=initializer) var_dict[name] = var return var def get_variable(self, feature_column, name): if feature_column not in self._all_variables: raise ValueError('Do not recognize FeatureColumn.') if name in self._all_variables[feature_column]: return self._all_variables[feature_column][name] raise ValueError('Could not find variable.') class EmbeddingColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('mean', embedding_column.combiner) self.assertIsNone(embedding_column.ckpt_to_load_from) self.assertIsNone(embedding_column.tensor_name_in_ckpt) self.assertIsNone(embedding_column.max_norm) self.assertTrue(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) self.assertTrue(embedding_column._is_v2_column) def test_is_v2_column(self): categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension) self.assertFalse(embedding_column._is_v2_column) @test_util.run_deprecated_v1 def test_all_constructor_args(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 original = fc.embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) for embedding_column in (original, copy.deepcopy(original)): self.assertEqual('aaa', embedding_column.categorical_column.name) self.assertEqual(3, embedding_column.categorical_column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.categorical_column.parse_example_spec) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.parse_example_spec) @test_util.run_deprecated_v1 def test_invalid_initializer(self): categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) with self.assertRaisesRegexp(ValueError, 'initializer must be callable'): fc.embedding_column(categorical_column, dimension=2, initializer='not_fn') @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_embedded = fc.embedding_column(a, dimension=2) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_embedded])) self.assertIn('aaa', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) @test_util.run_deprecated_v1 def test_transform_feature(self): a = fc.categorical_column_with_identity(key='aaa', num_buckets=3) a_embedded = fc.embedding_column(a, dimension=2) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) } outputs = fc._transform_features_v2(features, [a, a_embedded], None) output_a = outputs[a] output_embedded = outputs[a_embedded] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value(self, self.evaluate(output_a), self.evaluate(output_embedded)) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_old_categorical(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup = embedding_column._get_dense_tensor( fc_old._LazyBuilder({ 'aaa': sparse_input })) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_3d(self): # Inputs. vocabulary_size = 4 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)), values=(2, 0, 1, 1, 2), dense_shape=(4, 2, 5)) # Embedding variable. embedding_dimension = 3 embedding_values = ( (1., 2., 4.), # id 0 (3., 5., 1.), # id 1 (7., 11., 2.), # id 2 (2., 7., 12.) # id 3 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]] ((7., 11., 2.), (0., 0., 0.)), # example 1, ids [[], [0, 1]], embedding # = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]] ((0., 0., 0.), (2., 3.5, 2.5)), # example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]] ((0., 0., 0.), (0., 0., 0.)), # example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]] ((3., 5., 1.), (7., 11., 2.)), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_placeholder_inputs(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. input_indices = array_ops.placeholder(dtype=dtypes.int64) input_values = array_ops.placeholder(dtype=dtypes.int64) input_shape = array_ops.placeholder(dtype=dtypes.int64) embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_tensor.SparseTensorValue( indices=input_indices, values=input_values, dense_shape=input_shape) }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) with _initialized_session(): self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual( expected_lookups, embedding_lookup.eval( feed_dict={ input_indices: sparse_input.indices, input_values: sparse_input.values, input_shape: sparse_input.dense_shape, })) @test_util.run_deprecated_v1 def test_get_dense_tensor_restore_from_ckpt(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. The checkpoint file contains _embedding_values. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) ckpt_path = test.test_src_dir_path( 'python/feature_column/testdata/embedding.ckpt') ckpt_tensor = 'my_embedding' # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, ckpt_to_load_from=ckpt_path, tensor_name_in_ckpt=ckpt_tensor) state_manager = _TestStateManager() embedding_column.create_state(state_manager) # Provide sparse input and get dense result. embedding_lookup = embedding_column.get_dense_tensor( fc.FeatureTransformationCache({ 'aaa': sparse_input }), state_manager) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_linear_model(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): model = fc.LinearModel((embedding_column,)) predictions = model({categorical_column.name: sparse_input}) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_dense_features(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. l = fc.DenseFeatures((embedding_column,)) dense_features = l({'aaa': sparse_input}) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) for v in global_vars: self.assertIsInstance(v, variables_lib.Variable) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',), tuple([v.name for v in trainable_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(trainable_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(dense_features)) @test_util.run_deprecated_v1 def test_dense_features_not_trainable(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer, trainable=False) # Provide sparse input and get dense result. dense_features = fc.DenseFeatures((embedding_column,))({ 'aaa': sparse_input }) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('dense_features/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) self.assertItemsEqual([], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(global_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(dense_features)) @test_util.run_deprecated_v1 def test_input_layer(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. feature_layer = fc_old.input_layer({ 'aaa': sparse_input }, (embedding_column,)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual(('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in trainable_vars])) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(trainable_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(feature_layer)) def test_old_linear_model(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc_old.linear_model({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) def test_old_linear_model_old_categorical(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc_old._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc_old.linear_model({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_serialization_with_default_initializer(self): # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_column = fc.embedding_column(categorical_column, dimension=2) self.assertEqual([categorical_column], embedding_column.parents) config = embedding_column._get_config() self.assertEqual({ 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'number_buckets': 3, 'key': 'aaa', 'default_value': None } }, 'ckpt_to_load_from': None, 'combiner': 'mean', 'dimension': 2, 'initializer': { 'class_name': 'TruncatedNormal', 'config': { 'dtype': 'float32', 'stddev': 0.7071067811865475, 'seed': None, 'mean': 0.0 } }, 'max_norm': None, 'tensor_name_in_ckpt': None, 'trainable': True }, config) custom_objects = {'TruncatedNormal': init_ops.TruncatedNormal} new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects) self.assertEqual(embedding_column._get_config(), new_embedding_column._get_config()) self.assertIsNot(categorical_column, new_embedding_column.categorical_column) new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects, columns_by_name={categorical_column.name: categorical_column}) self.assertEqual(embedding_column._get_config(), new_embedding_column._get_config()) self.assertIs(categorical_column, new_embedding_column.categorical_column) @test_util.run_deprecated_v1 def test_serialization_with_custom_initializer(self): def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return ValueError('Not expected to be called') # Build columns. categorical_column = fc.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_column = fc.embedding_column( categorical_column, dimension=2, initializer=_initializer) self.assertEqual([categorical_column], embedding_column.parents) config = embedding_column._get_config() self.assertEqual({ 'categorical_column': { 'class_name': 'IdentityCategoricalColumn', 'config': { 'number_buckets': 3, 'key': 'aaa', 'default_value': None } }, 'ckpt_to_load_from': None, 'combiner': 'mean', 'dimension': 2, 'initializer': '_initializer', 'max_norm': None, 'tensor_name_in_ckpt': None, 'trainable': True }, config) custom_objects = { '_initializer': _initializer, } new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects) self.assertEqual(embedding_column, new_embedding_column) self.assertIsNot(categorical_column, new_embedding_column.categorical_column) new_embedding_column = fc.EmbeddingColumn._from_config( config, custom_objects=custom_objects, columns_by_name={categorical_column.name: categorical_column}) self.assertEqual(embedding_column, new_embedding_column) self.assertIs(categorical_column, new_embedding_column.categorical_column) class SharedEmbeddingColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc.shared_embedding_columns_v2( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertIsNone(embedding_column_a.max_norm) self.assertIsNone(embedding_column_b.max_norm) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b.parse_example_spec) @test_util.run_deprecated_v1 def test_all_constructor_args(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='shared_embedding_collection_name', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(42., embedding_column_a.max_norm) self.assertEqual(42., embedding_column_b.max_norm) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b.parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 original_a, _ = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='shared_embedding_collection_name', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) for embedding_column_a in (original_a, copy.deepcopy(original_a)): self.assertEqual('aaa', embedding_column_a.categorical_column.name) self.assertEqual(3, embedding_column_a.categorical_column.num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.categorical_column.parse_example_spec) self.assertEqual(42., embedding_column_a.max_norm) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.parse_example_spec) @test_util.run_deprecated_v1 def test_invalid_initializer(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) with self.assertRaisesRegexp(ValueError, 'initializer must be callable'): fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=2, initializer='not_fn') @test_util.run_deprecated_v1 def test_incompatible_column_type(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) categorical_column_c = fc.categorical_column_with_hash_bucket( key='ccc', hash_bucket_size=3) with self.assertRaisesRegexp( ValueError, 'all categorical_columns must have the same type.*' 'IdentityCategoricalColumn.*HashedCategoricalColumn'): fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b, categorical_column_c], dimension=2) @test_util.run_deprecated_v1 def test_weighted_categorical_column_ok(self): categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) weighted_categorical_column_a = fc.weighted_categorical_column( categorical_column_a, weight_feature_key='aaa_weights') categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) weighted_categorical_column_b = fc.weighted_categorical_column( categorical_column_b, weight_feature_key='bbb_weights') fc.shared_embedding_columns_v2( [weighted_categorical_column_a, categorical_column_b], dimension=2) fc.shared_embedding_columns_v2( [categorical_column_a, weighted_categorical_column_b], dimension=2) fc.shared_embedding_columns_v2( [weighted_categorical_column_a, weighted_categorical_column_b], dimension=2) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) b = fc.categorical_column_with_vocabulary_list( key='bbb', vocabulary_list=('omar', 'stringer', 'marlo')) a_embedded, b_embedded = fc.shared_embedding_columns_v2([a, b], dimension=2) data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), 'bbb': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'stringer', b'marlo'])), })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_embedded, b_embedded])) self.assertIn('aaa', features) self.assertIn('bbb', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'stringer', b'marlo'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['bbb'])) @test_util.run_deprecated_v1 def test_transform_feature(self): a = fc.categorical_column_with_identity(key='aaa', num_buckets=3) b = fc.categorical_column_with_identity(key='bbb', num_buckets=3) a_embedded, b_embedded = fc.shared_embedding_columns_v2([a, b], dimension=2) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } outputs = fc._transform_features_v2(features, [a, a_embedded, b, b_embedded], None) output_a = outputs[a] output_a_embedded = outputs[a_embedded] output_b = outputs[b] output_b_embedded = outputs[b_embedded] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value(self, self.evaluate(output_a), self.evaluate(output_a_embedded)) _assert_sparse_tensor_value(self, self.evaluate(output_b), self.evaluate(output_b_embedded)) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] input_features = {'aaa': input_a, 'bbb': input_b} # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups_a = ( # example 0: (7., 11.), # ids [2], embedding = [7, 11] # example 1: (2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] ) expected_lookups_b = ( # example 0: (1., 2.), # ids [0], embedding = [1, 2] # example 1: (0., 0.), # ids [], embedding = [0, 0] ) # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) embedding_lookup_b = embedding_column_b.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('aaa_bbb_shared_embedding:0',), tuple([v.name for v in global_vars])) embedding_var = global_vars[0] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(embedding_var)) self.assertAllEqual(expected_lookups_a, self.evaluate(embedding_lookup_a)) self.assertAllEqual(expected_lookups_b, self.evaluate(embedding_lookup_b)) @test_util.run_deprecated_v1 def test_get_dense_tensor_placeholder_inputs(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] # Specify shape, because dense input must have rank specified. input_a_placeholder = array_ops.placeholder( dtype=dtypes.int64, shape=[None, 3]) input_b_placeholder = array_ops.placeholder( dtype=dtypes.int64, shape=[None, 3]) input_features = { 'aaa': input_a_placeholder, 'bbb': input_b_placeholder, } feed_dict = { input_a_placeholder: input_a, input_b_placeholder: input_b, } # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) embedding_lookup_b = embedding_column_b.get_dense_tensor( fc.FeatureTransformationCache(input_features), None) with _initialized_session() as sess: sess.run([embedding_lookup_a, embedding_lookup_b], feed_dict=feed_dict) @test_util.run_deprecated_v1 def test_linear_model(self): # Inputs. batch_size = 2 vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): model = fc.LinearModel((embedding_column_a, embedding_column_b)) predictions = model({ categorical_column_a.name: input_a, categorical_column_b.name: input_b }) # Linear weights do not follow the column name. But this is a rare use # case, and fixing it would add too much complexity to the code. expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_shared_embedding/weights:0', 'aaa_bbb_shared_embedding:0', 'linear_model/bbb_shared_embedding/weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars['aaa_bbb_shared_embedding:0'] linear_weights_a = trainable_vars[ 'linear_model/aaa_shared_embedding/weights:0'] linear_weights_b = trainable_vars[ 'linear_model/bbb_shared_embedding/weights:0'] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_a)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_b)) self.assertAllClose(np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. self.evaluate( embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ))) self.evaluate(linear_weights_a.assign(((4.,), (6.,)))) # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29] self.evaluate(linear_weights_b.assign(((3.,), (5.,)))) # example 0, ids [0], embedding[0] = [1, 2] # example 1, ids [], embedding[1] = 0, 0] # sum(embeddings * linear_weights) # = [3*1 + 5*2, 3*0 +5*0] = [13, 0] self.assertAllClose([[94. + 13.], [29.]], self.evaluate(predictions)) def _test_dense_features(self, trainable=True): # Inputs. vocabulary_size = 3 sparse_input_a = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 4)), values=(2, 0, 1), dense_shape=(2, 5)) sparse_input_b = sparse_tensor.SparseTensorValue( # example 0, ids [0] # example 1, ids [] indices=((0, 0),), values=(0,), dense_shape=(2, 5)) sparse_input_c = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 1), (1, 1), (1, 3)), values=(2, 0, 1), dense_shape=(2, 5)) sparse_input_d = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [] indices=((0, 1),), values=(2,), dense_shape=(2, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0: # A ids [2], embedding = [7, 11] # B ids [0], embedding = [1, 2] # C ids [2], embedding = [7, 11] # D ids [2], embedding = [7, 11] (7., 11., 1., 2., 7., 11., 7., 11.), # example 1: # A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] # B ids [], embedding = [0, 0] # C ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] # D ids [], embedding = [0, 0] (2., 3.5, 0., 0., 2., 3.5, 0., 0.), ) # Build columns. categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) categorical_column_c = fc.categorical_column_with_identity( key='ccc', num_buckets=vocabulary_size) categorical_column_d = fc.categorical_column_with_identity( key='ddd', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer, trainable=trainable) embedding_column_c, embedding_column_d = fc.shared_embedding_columns_v2( [categorical_column_c, categorical_column_d], dimension=embedding_dimension, initializer=_initializer, trainable=trainable) features = { 'aaa': sparse_input_a, 'bbb': sparse_input_b, 'ccc': sparse_input_c, 'ddd': sparse_input_d } # Provide sparse input and get dense result. dense_features = fc.DenseFeatures( feature_columns=(embedding_column_b, embedding_column_a, embedding_column_c, embedding_column_d))( features) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'], tuple([v.name for v in global_vars])) for v in global_vars: self.assertIsInstance(v, variables_lib.Variable) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) if trainable: self.assertItemsEqual( ['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'], tuple([v.name for v in trainable_vars])) else: self.assertItemsEqual([], tuple([v.name for v in trainable_vars])) shared_embedding_vars = global_vars self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllEqual(embedding_values, self.evaluate(shared_embedding_vars[0])) self.assertAllEqual(expected_lookups, self.evaluate(dense_features)) @test_util.run_deprecated_v1 def test_dense_features(self): self._test_dense_features() @test_util.run_deprecated_v1 def test_dense_features_no_trainable(self): self._test_dense_features(trainable=False) @test_util.run_deprecated_v1 def test_serialization(self): def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return ValueError('Not expected to be called') categorical_column_a = fc.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_column_a, embedding_column_b = fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=2, initializer=_initializer) self.assertEqual([categorical_column_a], embedding_column_a.parents) self.assertEqual([categorical_column_b], embedding_column_b.parents) # TODO(rohanj): Add tests for (from|get)_config once implemented class WeightedCategoricalColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') self.assertEqual('ids_weighted_by_values', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'ids': parsing_ops.VarLenFeature(dtypes.int64), 'values': parsing_ops.VarLenFeature(dtypes.float32) }, column.parse_example_spec) self.assertTrue(column._is_v2_column) def test_is_v2_column(self): column = fc.weighted_categorical_column( categorical_column=fc_old._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') self.assertFalse(column._is_v2_column) @test_util.run_deprecated_v1 def test_deep_copy(self): """Tests deepcopy of categorical_column_with_hash_bucket.""" original = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') for column in (original, copy.deepcopy(original)): self.assertEqual('ids_weighted_by_values', column.name) self.assertEqual(3, column.num_buckets) self.assertEqual({ 'ids': parsing_ops.VarLenFeature(dtypes.int64), 'values': parsing_ops.VarLenFeature(dtypes.float32) }, column.parse_example_spec) def test_invalid_dtype_none(self): with self.assertRaisesRegexp(ValueError, 'is not convertible to float'): fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values', dtype=None) def test_invalid_dtype_string(self): with self.assertRaisesRegexp(ValueError, 'is not convertible to float'): fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values', dtype=dtypes.string) def test_invalid_input_dtype(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') strings = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'Bad dtype'): fc._transform_features_v2({ 'ids': strings, 'values': strings }, (column,), None) def test_column_name_collision(self): with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'): fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='aaa', num_buckets=3), weight_feature_key='aaa').parse_example_spec() def test_missing_weights(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'values is not in features dictionary'): fc._transform_features_v2({'ids': inputs}, (column,), None) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc.categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_weighted = fc.weighted_categorical_column(a, weight_feature_key='weights') data = example_pb2.Example( features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), 'weights': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=[1., 10.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec_v2([a_weighted])) self.assertIn('aaa', features) self.assertIn('weights', features) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), self.evaluate(features['aaa'])) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([1., 10.], dtype=np.float32), dense_shape=[1, 2]), self.evaluate(features['weights'])) @test_util.run_deprecated_v1 def test_transform_features(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) weights = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0.5, 1.0, 0.1), dense_shape=(2, 2)) id_tensor, weight_tensor = fc._transform_features_v2({ 'ids': inputs, 'values': weights, }, (column,), None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array(inputs.values, dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=weights.indices, values=np.array(weights.values, dtype=np.float32), dense_shape=weights.dense_shape), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_transform_features_dense_input(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') weights = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0.5, 1.0, 0.1), dense_shape=(2, 2)) id_tensor, weight_tensor = fc._transform_features_v2({ 'ids': ((0, -1), (1, 0)), 'values': weights, }, (column,), None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((0, 1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=weights.indices, values=np.array(weights.values, dtype=np.float32), dense_shape=weights.dense_shape), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_transform_features_dense_weights(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(2, 1, 0), dense_shape=(2, 2)) id_tensor, weight_tensor = fc._transform_features_v2({ 'ids': inputs, 'values': ((.5, 0.), (1., .1)), }, (column,), None)[column] self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array(inputs.values, dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((.5, 1., .1), dtype=np.float32), dense_shape=(2, 2)), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): model = fc.LinearModel((column,)) predictions = model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }) weight_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_linear_model_mismatched_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): with self.assertRaisesRegexp(ValueError, r'Dimensions.*are not compatible'): model = fc.LinearModel((column,)) model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (0, 1), (1, 0), (1, 1)), values=(.5, 11., 1., .1), dense_shape=(2, 2)) }) def test_linear_model_mismatched_dense_values(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): model = fc.LinearModel((column,), sparse_combiner='mean') predictions = model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,)) }) # Disabling the constant folding optimizer here since it changes the # error message differently on CPU and GPU. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) with _initialized_session(config): with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'): self.evaluate(predictions) def test_linear_model_mismatched_dense_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): model = fc.LinearModel((column,)) predictions = model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,), (.1,)) }) weight_var, bias = model.variables self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_old_linear_model(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_old_linear_model_mismatched_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): with self.assertRaisesRegexp(ValueError, r'Dimensions.*are not compatible'): fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (0, 1), (1, 0), (1, 1)), values=(.5, 11., 1., .1), dense_shape=(2, 2)) }, (column,)) def test_old_linear_model_mismatched_dense_values(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,)) }, (column,), sparse_combiner='mean') # Disabling the constant folding optimizer here since it changes the # error message differently on CPU and GPU. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) with _initialized_session(config): with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'): self.evaluate(predictions) def test_old_linear_model_mismatched_dense_shape(self): column = fc.weighted_categorical_column( categorical_column=fc.categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,), (.1,)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_old_linear_model_old_categorical(self): column = fc.weighted_categorical_column( categorical_column=fc_old._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc_old.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(lookup_ops.tables_initializer()) self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) self.evaluate(weight_var.assign(((1.,), (2.,), (3.,)))) # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) # TODO(ptucker): Add test with embedding of weighted categorical. @test_util.run_deprecated_v1 def test_serialization(self): categorical_column = fc.categorical_column_with_identity( key='ids', num_buckets=3) column = fc.weighted_categorical_column( categorical_column=categorical_column, weight_feature_key='weight') self.assertEqual([categorical_column, 'weight'], column.parents) config = column._get_config() self.assertEqual({ 'categorical_column': { 'config': { 'key': 'ids', 'number_buckets': 3, 'default_value': None }, 'class_name': 'IdentityCategoricalColumn' }, 'dtype': 'float32', 'weight_feature_key': 'weight' }, config) self.assertEqual(column, fc.WeightedCategoricalColumn._from_config(config)) new_column = fc.WeightedCategoricalColumn._from_config( config, columns_by_name={categorical_column.name: categorical_column}) self.assertEqual(column, new_column) self.assertIs(categorical_column, new_column.categorical_column) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/feature_column/feature_column_v2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for feature_column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import numpy as np from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.feature_column import feature_column as fc from tensorflow.python.feature_column import feature_column_v2 as fc_new from tensorflow.python.feature_column.feature_column import _CategoricalColumn from tensorflow.python.feature_column.feature_column import _DenseColumn from tensorflow.python.feature_column.feature_column import _FeatureColumn from tensorflow.python.feature_column.feature_column import _LazyBuilder from tensorflow.python.feature_column.feature_column import _LinearModel from tensorflow.python.feature_column.feature_column import _transform_features from tensorflow.python.feature_column.feature_column import InputLayer from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test def _initialized_session(config=None): sess = session.Session(config=config) sess.run(variables_lib.global_variables_initializer()) sess.run(lookup_ops.tables_initializer()) return sess class LazyColumnTest(test.TestCase): def test_transormations_called_once(self): class TransformCounter(_FeatureColumn): def __init__(self): self.num_transform = 0 @property def name(self): return 'TransformCounter' def _transform_feature(self, cache): self.num_transform += 1 # Count transform calls. return cache.get('a') @property def _parse_example_spec(self): pass builder = _LazyBuilder(features={'a': [[2], [3.]]}) column = TransformCounter() self.assertEqual(0, column.num_transform) builder.get(column) self.assertEqual(1, column.num_transform) builder.get(column) self.assertEqual(1, column.num_transform) def test_returns_transform_output(self): class Transformer(_FeatureColumn): @property def name(self): return 'Transformer' def _transform_feature(self, cache): return 'Output' @property def _parse_example_spec(self): pass builder = _LazyBuilder(features={'a': [[2], [3.]]}) column = Transformer() self.assertEqual('Output', builder.get(column)) self.assertEqual('Output', builder.get(column)) def test_does_not_pollute_given_features_dict(self): class Transformer(_FeatureColumn): @property def name(self): return 'Transformer' def _transform_feature(self, cache): return 'Output' @property def _parse_example_spec(self): pass features = {'a': [[2], [3.]]} builder = _LazyBuilder(features=features) builder.get(Transformer()) self.assertEqual(['a'], list(features.keys())) def test_error_if_feature_is_not_found(self): builder = _LazyBuilder(features={'a': [[2], [3.]]}) with self.assertRaisesRegexp(ValueError, 'bbb is not in features dictionary'): builder.get('bbb') with self.assertRaisesRegexp(ValueError, 'bbb is not in features dictionary'): builder.get(u'bbb') def test_not_supported_feature_column(self): class NotAProperColumn(_FeatureColumn): @property def name(self): return 'NotAProperColumn' def _transform_feature(self, cache): # It should return not None. pass @property def _parse_example_spec(self): pass builder = _LazyBuilder(features={'a': [[2], [3.]]}) with self.assertRaisesRegexp(ValueError, 'NotAProperColumn is not supported'): builder.get(NotAProperColumn()) def test_key_should_be_string_or_feature_colum(self): class NotAFeatureColumn(object): pass builder = _LazyBuilder(features={'a': [[2], [3.]]}) with self.assertRaisesRegexp( TypeError, '"key" must be either a "str" or "_FeatureColumn".'): builder.get(NotAFeatureColumn()) @test_util.run_deprecated_v1 def test_expand_dim_rank_1_sparse_tensor_empty_batch(self): # empty 1-D sparse tensor: builder = _LazyBuilder(features={'a': sparse_tensor.SparseTensor( indices=np.reshape(np.array([], dtype=np.int64), (0, 1)), dense_shape=[0], values=np.array([]))}) with self.cached_session(): spv = builder.get('a').eval() self.assertAllEqual(np.array([0, 1], dtype=np.int64), spv.dense_shape) self.assertAllEqual( np.reshape(np.array([], dtype=np.int64), (0, 2)), spv.indices) class NumericColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): a = fc._numeric_column('aaa') self.assertEqual('aaa', a.key) self.assertEqual('aaa', a.name) self.assertEqual('aaa', a._var_scope_name) self.assertEqual((1,), a.shape) self.assertIsNone(a.default_value) self.assertEqual(dtypes.float32, a.dtype) self.assertIsNone(a.normalizer_fn) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc._numeric_column(key=('aaa',)) def test_shape_saved_as_tuple(self): a = fc._numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]]) self.assertEqual((1, 2), a.shape) def test_default_value_saved_as_tuple(self): a = fc._numeric_column('aaa', default_value=4.) self.assertEqual((4.,), a.default_value) a = fc._numeric_column('aaa', shape=[1, 2], default_value=[[3, 2.]]) self.assertEqual(((3., 2.),), a.default_value) def test_shape_and_default_value_compatibility(self): fc._numeric_column('aaa', shape=[2], default_value=[1, 2.]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc._numeric_column('aaa', shape=[2], default_value=[1, 2, 3.]) fc._numeric_column( 'aaa', shape=[3, 2], default_value=[[2, 3], [1, 2], [2, 3.]]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc._numeric_column( 'aaa', shape=[3, 1], default_value=[[2, 3], [1, 2], [2, 3.]]) with self.assertRaisesRegexp(ValueError, 'The shape of default_value'): fc._numeric_column( 'aaa', shape=[3, 3], default_value=[[2, 3], [1, 2], [2, 3.]]) def test_default_value_type_check(self): fc._numeric_column( 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.float32) fc._numeric_column( 'aaa', shape=[2], default_value=[1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp(TypeError, 'must be compatible with dtype'): fc._numeric_column( 'aaa', shape=[2], default_value=[1, 2.], dtype=dtypes.int32) with self.assertRaisesRegexp(TypeError, 'default_value must be compatible with dtype'): fc._numeric_column('aaa', default_value=['string']) def test_shape_must_be_positive_integer(self): with self.assertRaisesRegexp(TypeError, 'shape dimensions must be integer'): fc._numeric_column( 'aaa', shape=[ 1.0, ]) with self.assertRaisesRegexp(ValueError, 'shape dimensions must be greater than 0'): fc._numeric_column( 'aaa', shape=[ 0, ]) def test_dtype_is_convertible_to_float(self): with self.assertRaisesRegexp(ValueError, 'dtype must be convertible to float'): fc._numeric_column('aaa', dtype=dtypes.string) def test_scalar_default_value_fills_the_shape(self): a = fc._numeric_column('aaa', shape=[2, 3], default_value=2.) self.assertEqual(((2., 2., 2.), (2., 2., 2.)), a.default_value) def test_parse_spec(self): a = fc._numeric_column('aaa', shape=[2, 3], dtype=dtypes.int32) self.assertEqual({ 'aaa': parsing_ops.FixedLenFeature((2, 3), dtype=dtypes.int32) }, a._parse_example_spec) @test_util.run_deprecated_v1 def test_parse_example_no_default_value(self): price = fc._numeric_column('price', shape=[2]) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([price])) self.assertIn('price', features) with self.cached_session(): self.assertAllEqual([[20., 110.]], features['price'].eval()) @test_util.run_deprecated_v1 def test_parse_example_with_default_value(self): price = fc._numeric_column('price', shape=[2], default_value=11.) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=[20., 110.])) })) no_data = example_pb2.Example(features=feature_pb2.Features( feature={ 'something_else': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString(), no_data.SerializeToString()], features=fc.make_parse_example_spec([price])) self.assertIn('price', features) with self.cached_session(): self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval()) def test_normalizer_fn_must_be_callable(self): with self.assertRaisesRegexp(TypeError, 'must be a callable'): fc._numeric_column('price', normalizer_fn='NotACallable') @test_util.run_deprecated_v1 def test_normalizer_fn_transform_feature(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc._numeric_column('price', shape=[2], normalizer_fn=_increment_two) output = _transform_features({'price': [[1., 2.], [5., 6.]]}, [price]) with self.cached_session(): self.assertAllEqual([[3., 4.], [7., 8.]], output[price].eval()) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): def _increment_two(input_tensor): return input_tensor + 2. price = fc._numeric_column('price', shape=[2], normalizer_fn=_increment_two) builder = _LazyBuilder({'price': [[1., 2.], [5., 6.]]}) self.assertEqual(builder.get(price), price._get_dense_tensor(builder)) def test_sparse_tensor_not_supported(self): price = fc._numeric_column('price') builder = _LazyBuilder({ 'price': sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0.3], dense_shape=[1, 1]) }) with self.assertRaisesRegexp(ValueError, 'must be a Tensor'): price._transform_feature(builder) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc._numeric_column('aaa', shape=[1, 2], default_value=[[3., 2.]]) a_copy = copy.deepcopy(a) self.assertEqual(a_copy.name, 'aaa') self.assertEqual(a_copy.shape, (1, 2)) self.assertEqual(a_copy.default_value, ((3., 2.),)) def test_numpy_default_value(self): a = fc._numeric_column( 'aaa', shape=[1, 2], default_value=np.array([[3., 2.]])) self.assertEqual(a.default_value, ((3., 2.),)) @test_util.run_deprecated_v1 def test_linear_model(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.]])) self.assertAllClose([[10.], [50.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = get_keras_linear_model_predictions(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.]])) self.assertAllClose([[10.], [50.]], self.evaluate(predictions)) class BucketizedColumnTest(test.TestCase): def test_invalid_source_column_type(self): a = fc._categorical_column_with_hash_bucket('aaa', hash_bucket_size=10) with self.assertRaisesRegexp( ValueError, 'source_column must be a column generated with numeric_column'): fc._bucketized_column(a, boundaries=[0, 1]) def test_invalid_source_column_shape(self): a = fc._numeric_column('aaa', shape=[2, 3]) with self.assertRaisesRegexp( ValueError, 'source_column must be one-dimensional column'): fc._bucketized_column(a, boundaries=[0, 1]) def test_invalid_boundaries(self): a = fc._numeric_column('aaa') with self.assertRaisesRegexp( ValueError, 'boundaries must be a sorted list'): fc._bucketized_column(a, boundaries=None) with self.assertRaisesRegexp( ValueError, 'boundaries must be a sorted list'): fc._bucketized_column(a, boundaries=1.) with self.assertRaisesRegexp( ValueError, 'boundaries must be a sorted list'): fc._bucketized_column(a, boundaries=[1, 0]) with self.assertRaisesRegexp( ValueError, 'boundaries must be a sorted list'): fc._bucketized_column(a, boundaries=[1, 1]) def test_name(self): a = fc._numeric_column('aaa', dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) self.assertEqual('aaa_bucketized', b.name) def test_var_scope_name(self): a = fc._numeric_column('aaa', dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) self.assertEqual('aaa_bucketized', b._var_scope_name) def test_parse_spec(self): a = fc._numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) self.assertEqual({ 'aaa': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32) }, b._parse_example_spec) def test_variable_shape(self): a = fc._numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) # Column 'aaa` has shape [2] times three buckets -> variable_shape=[2, 3]. self.assertAllEqual((2, 3), b._variable_shape) def test_num_buckets(self): a = fc._numeric_column('aaa', shape=[2], dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) # Column 'aaa` has shape [2] times three buckets -> num_buckets=6. self.assertEqual(6, b._num_buckets) @test_util.run_deprecated_v1 def test_parse_example(self): price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 50]) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=[20., 110.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([bucketized_price])) self.assertIn('price', features) with self.cached_session(): self.assertAllEqual([[20., 110.]], features['price'].eval()) @test_util.run_deprecated_v1 def test_transform_feature(self): price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): transformed_tensor = _transform_features({ 'price': [[-1., 1.], [5., 6.]] }, [bucketized_price]) with _initialized_session(): self.assertAllEqual([[0, 1], [3, 4]], transformed_tensor[bucketized_price].eval()) def test_get_dense_tensor_one_input_value(self): """Tests _get_dense_tensor() for input with shape=[1].""" price = fc._numeric_column('price', shape=[1]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): builder = _LazyBuilder({'price': [[-1.], [1.], [5.], [6.]]}) with _initialized_session(): bucketized_price_tensor = bucketized_price._get_dense_tensor(builder) self.assertAllClose( # One-hot tensor. [[[1., 0., 0., 0., 0.]], [[0., 1., 0., 0., 0.]], [[0., 0., 0., 1., 0.]], [[0., 0., 0., 0., 1.]]], self.evaluate(bucketized_price_tensor)) def test_get_dense_tensor_two_input_values(self): """Tests _get_dense_tensor() for input with shape=[2].""" price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): builder = _LazyBuilder({'price': [[-1., 1.], [5., 6.]]}) with _initialized_session(): bucketized_price_tensor = bucketized_price._get_dense_tensor(builder) self.assertAllClose( # One-hot tensor. [[[1., 0., 0., 0., 0.], [0., 1., 0., 0., 0.]], [[0., 0., 0., 1., 0.], [0., 0., 0., 0., 1.]]], self.evaluate(bucketized_price_tensor)) def test_get_sparse_tensors_one_input_value(self): """Tests _get_sparse_tensors() for input with shape=[1].""" price = fc._numeric_column('price', shape=[1]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): builder = _LazyBuilder({'price': [[-1.], [1.], [5.], [6.]]}) with _initialized_session() as sess: id_weight_pair = bucketized_price._get_sparse_tensors(builder) self.assertIsNone(id_weight_pair.weight_tensor) id_tensor_value = sess.run(id_weight_pair.id_tensor) self.assertAllEqual( [[0, 0], [1, 0], [2, 0], [3, 0]], id_tensor_value.indices) self.assertAllEqual([0, 1, 3, 4], id_tensor_value.values) self.assertAllEqual([4, 1], id_tensor_value.dense_shape) def test_get_sparse_tensors_two_input_values(self): """Tests _get_sparse_tensors() for input with shape=[2].""" price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): builder = _LazyBuilder({'price': [[-1., 1.], [5., 6.]]}) with _initialized_session() as sess: id_weight_pair = bucketized_price._get_sparse_tensors(builder) self.assertIsNone(id_weight_pair.weight_tensor) id_tensor_value = sess.run(id_weight_pair.id_tensor) self.assertAllEqual( [[0, 0], [0, 1], [1, 0], [1, 1]], id_tensor_value.indices) # Values 0-4 correspond to the first column of the input price. # Values 5-9 correspond to the second column of the input price. self.assertAllEqual([0, 6, 3, 9], id_tensor_value.values) self.assertAllEqual([2, 2], id_tensor_value.dense_shape) def test_sparse_tensor_input_not_supported(self): price = fc._numeric_column('price') bucketized_price = fc._bucketized_column(price, boundaries=[0, 1]) builder = _LazyBuilder({ 'price': sparse_tensor.SparseTensor( indices=[[0, 0]], values=[0.3], dense_shape=[1, 1]) }) with self.assertRaisesRegexp(ValueError, 'must be a Tensor'): bucketized_price._transform_feature(builder) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc._numeric_column('aaa', shape=[2]) a_bucketized = fc._bucketized_column(a, boundaries=[0, 1]) a_bucketized_copy = copy.deepcopy(a_bucketized) self.assertEqual(a_bucketized_copy.name, 'aaa_bucketized') self.assertAllEqual(a_bucketized_copy._variable_shape, (2, 3)) self.assertEqual(a_bucketized_copy.boundaries, (0, 1)) def test_linear_model_one_input_value(self): """Tests linear_model() for input with shape=[1].""" price = fc._numeric_column('price', shape=[1]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} predictions = fc.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run(bucketized_price_var.assign( [[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) def test_linear_model_two_input_values(self): """Tests linear_model() for input with shape=[2].""" price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1., 1.], [5., 6.]]} predictions = fc.linear_model(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight per bucket per input column, all initialized to zero. self.assertAllClose( [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(bucketized_price_var.assign( [[10.], [20.], [30.], [40.], [50.], [60.], [70.], [80.], [90.], [100.]])) # 1st example: # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 6th bucket, whose weight is 70. # 2nd example: # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 9th bucket, whose weight is 100. self.assertAllClose([[80.], [140.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[81.], [141.]], self.evaluate(predictions)) def test_keras_linear_model_one_input_value(self): """Tests _LinearModel for input with shape=[1].""" price = fc._numeric_column('price', shape=[1]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1.], [1.], [5.], [6.]]} predictions = get_keras_linear_model_predictions(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight variable per bucket, all initialized to zero. self.assertAllClose([[0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.]])) # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 1st bucket, whose weight is 20. # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 4th bucket, whose weight is 50. self.assertAllClose([[10.], [20.], [40.], [50.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[11.], [21.], [41.], [51.]], self.evaluate(predictions)) def test_keras_linear_model_two_input_values(self): """Tests _LinearModel for input with shape=[2].""" price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 2, 4, 6]) with ops.Graph().as_default(): features = {'price': [[-1., 1.], [5., 6.]]} predictions = get_keras_linear_model_predictions(features, [bucketized_price]) bias = get_linear_model_bias() bucketized_price_var = get_linear_model_column_var(bucketized_price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) # One weight per bucket per input column, all initialized to zero. self.assertAllClose( [[0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.], [0.]], self.evaluate(bucketized_price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run( bucketized_price_var.assign([[10.], [20.], [30.], [40.], [50.], [60.], [70.], [80.], [90.], [100.]])) # 1st example: # price -1. is in the 0th bucket, whose weight is 10. # price 1. is in the 6th bucket, whose weight is 70. # 2nd example: # price 5. is in the 3rd bucket, whose weight is 40. # price 6. is in the 9th bucket, whose weight is 100. self.assertAllClose([[80.], [140.]], self.evaluate(predictions)) sess.run(bias.assign([1.])) self.assertAllClose([[81.], [141.]], self.evaluate(predictions)) class HashedCategoricalColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): a = fc._categorical_column_with_hash_bucket('aaa', 10) self.assertEqual('aaa', a.name) self.assertEqual('aaa', a._var_scope_name) self.assertEqual('aaa', a.key) self.assertEqual(10, a.hash_bucket_size) self.assertEqual(dtypes.string, a.dtype) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc._categorical_column_with_hash_bucket(('key',), 10) def test_bucket_size_should_be_given(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be set.'): fc._categorical_column_with_hash_bucket('aaa', None) def test_bucket_size_should_be_positive(self): with self.assertRaisesRegexp(ValueError, 'hash_bucket_size must be at least 1'): fc._categorical_column_with_hash_bucket('aaa', 0) def test_dtype_should_be_string_or_integer(self): fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.string) fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.float32) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc._categorical_column_with_hash_bucket('aaa', 10) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(10, column.hash_bucket_size) self.assertEqual(10, column._num_buckets) self.assertEqual(dtypes.string, column.dtype) def test_parse_spec_string(self): a = fc._categorical_column_with_hash_bucket('aaa', 10) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, a._parse_example_spec) def test_parse_spec_int(self): a = fc._categorical_column_with_hash_bucket('aaa', 10, dtype=dtypes.int32) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, a._parse_example_spec) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc._categorical_column_with_hash_bucket('aaa', 10) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a])) self.assertIn('aaa', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), features['aaa'].eval()) @test_util.run_deprecated_v1 def test_strings_should_be_hashed(self): hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) outputs = _transform_features({'wire': wire_tensor}, [hashed_sparse]) output = outputs[hashed_sparse] # Check exact hashed output. If hashing changes this test will break. expected_values = [6, 4, 1] with self.cached_session(): self.assertEqual(dtypes.int64, output.values.dtype) self.assertAllEqual(expected_values, output.values.eval()) self.assertAllEqual(wire_tensor.indices.eval(), output.indices.eval()) self.assertAllEqual(wire_tensor.dense_shape.eval(), output.dense_shape.eval()) def test_tensor_dtype_should_be_string_or_integer(self): string_fc = fc._categorical_column_with_hash_bucket( 'a_string', 10, dtype=dtypes.string) int_fc = fc._categorical_column_with_hash_bucket( 'a_int', 10, dtype=dtypes.int32) float_fc = fc._categorical_column_with_hash_bucket( 'a_float', 10, dtype=dtypes.string) int_tensor = sparse_tensor.SparseTensor( values=[101], indices=[[0, 0]], dense_shape=[1, 1]) string_tensor = sparse_tensor.SparseTensor( values=['101'], indices=[[0, 0]], dense_shape=[1, 1]) float_tensor = sparse_tensor.SparseTensor( values=[101.], indices=[[0, 0]], dense_shape=[1, 1]) builder = _LazyBuilder({ 'a_int': int_tensor, 'a_string': string_tensor, 'a_float': float_tensor }) builder.get(string_fc) builder.get(int_fc) with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): builder.get(float_fc) def test_dtype_should_match_with_tensor(self): hashed_sparse = fc._categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) builder = _LazyBuilder({'wire': wire_tensor}) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): builder.get(hashed_sparse) @test_util.run_deprecated_v1 def test_ints_should_be_hashed(self): hashed_sparse = fc._categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=[101, 201, 301], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) builder = _LazyBuilder({'wire': wire_tensor}) output = builder.get(hashed_sparse) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] with self.cached_session(): self.assertAllEqual(expected_values, output.values.eval()) @test_util.run_deprecated_v1 def test_int32_64_is_compatible(self): hashed_sparse = fc._categorical_column_with_hash_bucket( 'wire', 10, dtype=dtypes.int64) wire_tensor = sparse_tensor.SparseTensor( values=constant_op.constant([101, 201, 301], dtype=dtypes.int32), indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) builder = _LazyBuilder({'wire': wire_tensor}) output = builder.get(hashed_sparse) # Check exact hashed output. If hashing changes this test will break. expected_values = [3, 7, 5] with self.cached_session(): self.assertAllEqual(expected_values, output.values.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) builder = _LazyBuilder({ 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) }) id_weight_pair = hashed_sparse._get_sparse_tensors(builder) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor) def test_get_sparse_tensors_weight_collections(self): column = fc._categorical_column_with_hash_bucket('aaa', 10) inputs = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) column._get_sparse_tensors( _LazyBuilder({ 'aaa': inputs }), weight_collections=('my_weights',)) self.assertItemsEqual( [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) self.assertItemsEqual([], ops.get_collection('my_weights')) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) builder = _LazyBuilder({'wire': (('omar', ''), ('stringer', 'marlo'))}) id_weight_pair = hashed_sparse._get_sparse_tensors(builder) self.assertIsNone(id_weight_pair.weight_tensor) self.assertEqual(builder.get(hashed_sparse), id_weight_pair.id_tensor) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc._categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = fc.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval() # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): wire_column = fc._categorical_column_with_hash_bucket('wire', 4) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval() # 'marlo' -> 3: wire_var[3] = 4 # 'skywalker' -> 2, 'omar' -> 2: wire_var[2] + wire_var[2] = 3+3 = 6 self.assertAllClose(((4.,), (6.,)), self.evaluate(predictions)) class CrossedColumnTest(test.TestCase): def test_keys_empty(self): with self.assertRaisesRegexp( ValueError, 'keys must be a list with length > 1'): fc._crossed_column([], 10) def test_keys_length_one(self): with self.assertRaisesRegexp( ValueError, 'keys must be a list with length > 1'): fc._crossed_column(['a'], 10) def test_key_type_unsupported(self): with self.assertRaisesRegexp(ValueError, 'Unsupported key type'): fc._crossed_column(['a', fc._numeric_column('c')], 10) with self.assertRaisesRegexp( ValueError, 'categorical_column_with_hash_bucket is not supported'): fc._crossed_column( ['a', fc._categorical_column_with_hash_bucket('c', 10)], 10) def test_hash_bucket_size_negative(self): with self.assertRaisesRegexp( ValueError, 'hash_bucket_size must be > 1'): fc._crossed_column(['a', 'c'], -1) def test_hash_bucket_size_zero(self): with self.assertRaisesRegexp( ValueError, 'hash_bucket_size must be > 1'): fc._crossed_column(['a', 'c'], 0) def test_hash_bucket_size_none(self): with self.assertRaisesRegexp( ValueError, 'hash_bucket_size must be > 1'): fc._crossed_column(['a', 'c'], None) def test_name(self): a = fc._numeric_column('a', dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) crossed1 = fc._crossed_column(['d1', 'd2'], 10) crossed2 = fc._crossed_column([b, 'c', crossed1], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_name_ordered_alphabetically(self): """Tests that the name does not depend on the order of given columns.""" a = fc._numeric_column('a', dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) crossed1 = fc._crossed_column(['d1', 'd2'], 10) crossed2 = fc._crossed_column([crossed1, 'c', b], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_name_leaf_keys_ordered_alphabetically(self): """Tests that the name does not depend on the order of given columns.""" a = fc._numeric_column('a', dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) crossed1 = fc._crossed_column(['d2', 'c'], 10) crossed2 = fc._crossed_column([crossed1, 'd1', b], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2.name) def test_var_scope_name(self): a = fc._numeric_column('a', dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) crossed1 = fc._crossed_column(['d1', 'd2'], 10) crossed2 = fc._crossed_column([b, 'c', crossed1], 10) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2._var_scope_name) def test_parse_spec(self): a = fc._numeric_column('a', shape=[2], dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) crossed = fc._crossed_column([b, 'c'], 10) self.assertEqual({ 'a': parsing_ops.FixedLenFeature((2,), dtype=dtypes.int32), 'c': parsing_ops.VarLenFeature(dtypes.string), }, crossed._parse_example_spec) def test_num_buckets(self): a = fc._numeric_column('a', shape=[2], dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) crossed = fc._crossed_column([b, 'c'], 15) self.assertEqual(15, crossed._num_buckets) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc._numeric_column('a', dtype=dtypes.int32) b = fc._bucketized_column(a, boundaries=[0, 1]) crossed1 = fc._crossed_column(['d1', 'd2'], 10) crossed2 = fc._crossed_column([b, 'c', crossed1], 15, hash_key=5) crossed2_copy = copy.deepcopy(crossed2) self.assertEqual('a_bucketized_X_c_X_d1_X_d2', crossed2_copy.name,) self.assertEqual(15, crossed2_copy.hash_bucket_size) self.assertEqual(5, crossed2_copy.hash_key) @test_util.run_deprecated_v1 def test_parse_example(self): price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 50]) price_cross_wire = fc._crossed_column([bucketized_price, 'wire'], 10) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'price': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=[20., 110.])), 'wire': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([price_cross_wire])) self.assertIn('price', features) self.assertIn('wire', features) with self.cached_session(): self.assertAllEqual([[20., 110.]], features['price'].eval()) wire_sparse = features['wire'] self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices.eval()) # Use byte constants to pass the open-source test. self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values.eval()) self.assertAllEqual([1, 2], wire_sparse.dense_shape.eval()) @test_util.run_deprecated_v1 def test_transform_feature(self): price = fc._numeric_column('price', shape=[2]) bucketized_price = fc._bucketized_column(price, boundaries=[0, 50]) hash_bucket_size = 10 price_cross_wire = fc._crossed_column([bucketized_price, 'wire'], hash_bucket_size) features = { 'price': constant_op.constant([[1., 2.], [5., 6.]]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]), } outputs = _transform_features(features, [price_cross_wire]) output = outputs[price_cross_wire] with self.cached_session() as sess: output_val = self.evaluate(output) self.assertAllEqual( [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2], [1, 3]], output_val.indices) for val in output_val.values: self.assertIn(val, list(range(hash_bucket_size))) self.assertAllEqual([2, 4], output_val.dense_shape) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc._bucketized_column(a, boundaries=(0, 1)) crossed1 = fc._crossed_column(['d1', 'd2'], 10) crossed2 = fc._crossed_column([b, 'c', crossed1], 15, hash_key=5) with ops.Graph().as_default(): builder = _LazyBuilder({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), 'd1': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['d1A', 'd1B', 'd1C'], dense_shape=(2, 2)), 'd2': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['d2A', 'd2B', 'd2C'], dense_shape=(2, 2)), }) id_weight_pair = crossed2._get_sparse_tensors(builder) with _initialized_session(): id_tensor_eval = id_weight_pair.id_tensor.eval() self.assertAllEqual( ((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13), (1, 14), (1, 15)), id_tensor_eval.indices) # Check exact hashed output. If hashing changes this test will break. # All values are within [0, hash_bucket_size). expected_values = ( 6, 14, 0, 13, 8, 8, 10, 12, 2, 0, 1, 9, 8, 12, 2, 0, 10, 11) self.assertAllEqual(expected_values, id_tensor_eval.values) self.assertAllEqual((2, 16), id_tensor_eval.dense_shape) def test_get_sparse_tensors_simple(self): """Same as test_get_sparse_tensors, but with simpler values.""" a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc._bucketized_column(a, boundaries=(0, 1)) crossed = fc._crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): builder = _LazyBuilder({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }) id_weight_pair = crossed._get_sparse_tensors(builder) with _initialized_session(): id_tensor_eval = id_weight_pair.id_tensor.eval() self.assertAllEqual( ((0, 0), (0, 1), (1, 0), (1, 1), (1, 2), (1, 3)), id_tensor_eval.indices) # Check exact hashed output. If hashing changes this test will break. # All values are within [0, hash_bucket_size). expected_values = (1, 0, 1, 3, 4, 2) self.assertAllEqual(expected_values, id_tensor_eval.values) self.assertAllEqual((2, 4), id_tensor_eval.dense_shape) @test_util.run_deprecated_v1 def test_linear_model(self): """Tests linear_model. Uses data from test_get_sparse_tesnsors_simple. """ a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc._bucketized_column(a, boundaries=(0, 1)) crossed = fc._crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): predictions = fc.linear_model({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) bias = get_linear_model_bias() crossed_var = get_linear_model_column_var(crossed) with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) def test_linear_model_with_weights(self): class _TestColumnWithWeights(_CategoricalColumn): """Produces sparse IDs and sparse weights.""" @property def name(self): return 'test_column' @property def _parse_example_spec(self): return { self.name: parsing_ops.VarLenFeature(dtypes.int32), '{}_weights'.format(self.name): parsing_ops.VarLenFeature( dtypes.float32), } @property def _num_buckets(self): return 5 def _transform_feature(self, inputs): return (inputs.get(self.name), inputs.get('{}_weights'.format(self.name))) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Populates both id_tensor and weight_tensor.""" ids_and_weights = inputs.get(self) return _CategoricalColumn.IdWeightPair( id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1]) t = _TestColumnWithWeights() crossed = fc._crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): with self.assertRaisesRegexp( ValueError, 'crossed_column does not support weight_tensor.*{}'.format(t.name)): fc.linear_model({ t.name: sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[0, 1, 2], dense_shape=(2, 2)), '{}_weights'.format(t.name): sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[1., 10., 2.], dense_shape=(2, 2)), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): """Tests _LinearModel. Uses data from test_get_sparse_tesnsors_simple. """ a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc._bucketized_column(a, boundaries=(0, 1)) crossed = fc._crossed_column([b, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ 'a': constant_op.constant(((-1., .5), (.5, 1.))), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) bias = get_linear_model_bias() crossed_var = get_linear_model_column_var(crossed) with _initialized_session() as sess: self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,), (0.,)), self.evaluate(crossed_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) sess.run(crossed_var.assign(((1.,), (2.,), (3.,), (4.,), (5.,)))) # Expected ids after cross = (1, 0, 1, 3, 4, 2) self.assertAllClose(((3.,), (14.,)), self.evaluate(predictions)) sess.run(bias.assign((.1,))) self.assertAllClose(((3.1,), (14.1,)), self.evaluate(predictions)) def test_keras_linear_model_with_weights(self): class _TestColumnWithWeights(_CategoricalColumn): """Produces sparse IDs and sparse weights.""" @property def name(self): return 'test_column' @property def _parse_example_spec(self): return { self.name: parsing_ops.VarLenFeature(dtypes.int32), '{}_weights'.format(self.name): parsing_ops.VarLenFeature(dtypes.float32), } @property def _num_buckets(self): return 5 def _transform_feature(self, inputs): return (inputs.get(self.name), inputs.get('{}_weights'.format(self.name))) def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Populates both id_tensor and weight_tensor.""" ids_and_weights = inputs.get(self) return _CategoricalColumn.IdWeightPair( id_tensor=ids_and_weights[0], weight_tensor=ids_and_weights[1]) t = _TestColumnWithWeights() crossed = fc._crossed_column([t, 'c'], hash_bucket_size=5, hash_key=5) with ops.Graph().as_default(): with self.assertRaisesRegexp( ValueError, 'crossed_column does not support weight_tensor.*{}'.format(t.name)): get_keras_linear_model_predictions({ t.name: sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[0, 1, 2], dense_shape=(2, 2)), '{}_weights'.format(t.name): sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=[1., 10., 2.], dense_shape=(2, 2)), 'c': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=['cA', 'cB', 'cC'], dense_shape=(2, 2)), }, (crossed,)) def get_linear_model_bias(name='linear_model'): with variable_scope.variable_scope(name, reuse=True): return variable_scope.get_variable('bias_weights') def get_linear_model_column_var(column, name='linear_model'): return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, name + '/' + column.name)[0] def get_keras_linear_model_predictions(features, feature_columns, units=1, sparse_combiner='sum', weight_collections=None, trainable=True, cols_to_vars=None): keras_linear_model = _LinearModel( feature_columns, units, sparse_combiner, weight_collections, trainable, name='linear_model') retval = keras_linear_model(features) # pylint: disable=not-callable if cols_to_vars is not None: cols_to_vars.update(keras_linear_model.cols_to_vars()) return retval class LinearModelTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc.linear_model(features={}, feature_columns=[]) def test_should_be_feature_column(self): with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'): fc.linear_model(features={'a': [[0]]}, feature_columns='NotSupported') def test_should_be_dense_or_categorical_column(self): class NotSupportedColumn(_FeatureColumn): @property def name(self): return 'NotSupportedColumn' def _transform_feature(self, cache): pass @property def _parse_example_spec(self): pass with self.assertRaisesRegexp( ValueError, 'must be either a _DenseColumn or _CategoricalColumn'): fc.linear_model( features={'a': [[0]]}, feature_columns=[NotSupportedColumn()]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.linear_model( features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')}) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc.linear_model( features={'a': [[0]]}, feature_columns=[fc._numeric_column('a'), fc._numeric_column('a')]) def test_dense_bias(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) sess.run(price_var.assign([[10.]])) sess.run(bias.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions)) def test_sparse_bias(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc.linear_model(features, [wire_cast]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(wire_cast_var)) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_and_sparse_bias(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) price = fc._numeric_column('price') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]} predictions = fc.linear_model(features, [wire_cast, price]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) sess.run(price_var.assign([[10.]])) self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions)) def test_dense_and_sparse_column(self): """When the column is both dense and sparse, uses sparse tensors.""" class _DenseAndSparseColumn(_DenseColumn, _CategoricalColumn): @property def name(self): return 'dense_and_sparse_column' @property def _parse_example_spec(self): return {self.name: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): return inputs.get(self.name) @property def _variable_shape(self): raise ValueError('Should not use this method.') def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): raise ValueError('Should not use this method.') @property def _num_buckets(self): return 4 def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): sp_tensor = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [1, 1]], values=[2, 0, 3], dense_shape=[2, 2]) return _CategoricalColumn.IdWeightPair(sp_tensor, None) dense_and_sparse_column = _DenseAndSparseColumn() with ops.Graph().as_default(): sp_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {dense_and_sparse_column.name: sp_tensor} predictions = fc.linear_model(features, [dense_and_sparse_column]) bias = get_linear_model_bias() dense_and_sparse_column_var = get_linear_model_column_var( dense_and_sparse_column) with _initialized_session() as sess: sess.run(dense_and_sparse_column_var.assign( [[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_multi_output(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = fc.linear_model(features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[10., 100., 1000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]], self.evaluate(predictions)) def test_sparse_multi_output(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc.linear_model(features, [wire_cast], units=3) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var)) sess.run( wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [ 1000., 1100., 1200. ], [10000., 11000., 12000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]], self.evaluate(predictions)) def test_dense_multi_dimension(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = fc.linear_model(features, [price]) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_sparse_multi_rank(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = array_ops.sparse_placeholder(dtypes.string) wire_value = sparse_tensor.SparseTensorValue( values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2] indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]], dense_shape=[2, 2, 2]) features = {'wire_cast': wire_tensor} predictions = fc.linear_model(features, [wire_cast]) wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var)) self.assertAllClose( np.zeros((2, 1)), predictions.eval(feed_dict={wire_tensor: wire_value})) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.assertAllClose( [[1010.], [11000.]], predictions.eval(feed_dict={wire_tensor: wire_value})) def test_sparse_combiner(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = fc.linear_model( features, [wire_cast], sparse_combiner='mean') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions)) def test_sparse_combiner_with_negative_weights(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) wire_cast_weights = fc._weighted_categorical_column(wire_cast, 'weights') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = { 'wire_cast': wire_tensor, 'weights': constant_op.constant([[1., 1., -1.0]]) } predictions = fc.linear_model( features, [wire_cast_weights], sparse_combiner='sum') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [-9985.]], self.evaluate(predictions)) def test_dense_multi_dimension_multi_output(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = fc.linear_model(features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]])) sess.run(bias.assign([2., 3., 4.])) self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]], self.evaluate(predictions)) def test_raises_if_shape_mismatch(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc.linear_model(features, [price]) def test_dense_reshaping(self): price = fc._numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} predictions = fc.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_dense_multi_column(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]] } predictions = fc.linear_model(features, [price1, price2]) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price1_var)) self.assertAllClose([[0.]], self.evaluate(price2_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price1_var.assign([[10.], [100.]])) sess.run(price2_var.assign([[1000.]])) sess.run(bias.assign([7.])) self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions)) def test_fills_cols_to_vars(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} cols_to_vars = {} fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) self.assertAllEqual(cols_to_vars['bias'], [bias]) self.assertAllEqual(cols_to_vars[price1], [price1_var]) self.assertAllEqual(cols_to_vars[price2], [price2_var]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2', shape=3) with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [6., 7.]], 'price2': [[3., 4., 5.], [8., 9., 10.]] } cols_to_vars = {} with variable_scope.variable_scope( 'linear', partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)): fc.linear_model(features, [price1, price2], cols_to_vars=cols_to_vars) with _initialized_session(): self.assertEqual([0.], cols_to_vars['bias'][0].eval()) # Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables. self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval()) self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval()) # Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and # a [1, 1] Variable. self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval()) self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval()) def test_fills_cols_to_output_tensors(self): # Provide three _DenseColumn's to input_layer: a _NumericColumn, a # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn # creates a Variable. apple_numeric_column = fc._numeric_column('apple_numeric_column') banana_dense_feature = fc._numeric_column('banana_dense_feature') banana_dense_feature_bucketized = fc._bucketized_column( banana_dense_feature, boundaries=[0.]) cherry_sparse_column = fc._categorical_column_with_hash_bucket( 'cherry_sparse_feature', hash_bucket_size=5) dragonfruit_embedding_column = fc._embedding_column( cherry_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'apple_numeric_column': [[3.], [4.]], 'banana_dense_feature': [[-1.], [4.]], 'cherry_sparse_feature': [['a'], ['x']], } cols_to_output_tensors = {} all_cols = [ apple_numeric_column, banana_dense_feature_bucketized, dragonfruit_embedding_column ] input_layer = fc.input_layer( features, all_cols, cols_to_output_tensors=cols_to_output_tensors) # We check the mapping by checking that we have the right keys, # and that the values (output_tensors) were indeed the ones used to # form the input layer. self.assertItemsEqual(all_cols, cols_to_output_tensors.keys()) input_layer_inputs = [tensor for tensor in input_layer.op.inputs[:-1]] output_tensors = [tensor for tensor in cols_to_output_tensors.values()] self.assertItemsEqual(input_layer_inputs, output_tensors) def test_dense_collection(self): price = fc._numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc.linear_model(features, [price], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) self.assertIn(bias, my_vars) self.assertIn(price_var, my_vars) def test_sparse_collection(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc.linear_model( features, [wire_cast], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, my_vars) self.assertIn(wire_cast_var, my_vars) def test_dense_trainable_default(self): price = fc._numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc.linear_model(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertIn(bias, trainable_vars) self.assertIn(price_var, trainable_vars) def test_sparse_trainable_default(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc.linear_model(features, [wire_cast]) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, trainable_vars) self.assertIn(wire_cast_var, trainable_vars) def test_dense_trainable_false(self): price = fc._numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} fc.linear_model(features, [price], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_sparse_trainable_false(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} fc.linear_model(features, [wire_cast], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_column_order(self): price_a = fc._numeric_column('price_a') price_b = fc._numeric_column('price_b') wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } fc.linear_model( features, [price_a, wire_cast, price_b], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } fc.linear_model( features, [wire_cast, price_b, price_a], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) def test_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): fc.linear_model(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') price3 = fc._numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.linear_model(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } predictions = fc.linear_model(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'must have the same size and shape'): sess.run( predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } predictions = fc.linear_model(features, [price1, price2]) with _initialized_session() as sess: sess.run( predictions, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): price = fc._numeric_column('price') price_buckets = fc._bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([-1., 12.,]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) net = fc.linear_model(features, [price_buckets, body_style]) with _initialized_session() as sess: bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): price = fc._numeric_column('price') price_buckets = fc._bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) country = fc._categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) price_data = np.array([-1., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array(['US', 'CA']) net = fc.linear_model(features, [price_buckets, body_style, country]) bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) with _initialized_session() as sess: sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): price = fc._numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc.linear_model(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc.linear_model(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) def test_multiple_linear_models(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features1 = {'price': [[1.], [5.]]} features2 = {'price': [[2.], [10.]]} predictions1 = fc.linear_model(features1, [price]) predictions2 = fc.linear_model(features2, [price]) bias1 = get_linear_model_bias(name='linear_model') bias2 = get_linear_model_bias(name='linear_model_1') price_var1 = get_linear_model_column_var(price, name='linear_model') price_var2 = get_linear_model_column_var(price, name='linear_model_1') with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias1)) sess.run(price_var1.assign([[10.]])) sess.run(bias1.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions1)) self.assertAllClose([0.], self.evaluate(bias2)) sess.run(price_var2.assign([[10.]])) sess.run(bias2.assign([5.])) self.assertAllClose([[25.], [105.]], self.evaluate(predictions2)) class _LinearModelTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): get_keras_linear_model_predictions(features={}, feature_columns=[]) def test_should_be_feature_column(self): with self.assertRaisesRegexp(ValueError, 'must be a _FeatureColumn'): get_keras_linear_model_predictions( features={'a': [[0]]}, feature_columns='NotSupported') def test_should_be_dense_or_categorical_column(self): class NotSupportedColumn(_FeatureColumn): @property def name(self): return 'NotSupportedColumn' def _transform_feature(self, cache): pass @property def _parse_example_spec(self): pass with self.assertRaisesRegexp( ValueError, 'must be either a _DenseColumn or _CategoricalColumn'): get_keras_linear_model_predictions( features={'a': [[0]]}, feature_columns=[NotSupportedColumn()]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.linear_model( features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')}) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): get_keras_linear_model_predictions( features={'a': [[0]]}, feature_columns=[fc._numeric_column('a'), fc._numeric_column('a')]) def test_dense_bias(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = get_keras_linear_model_predictions(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) sess.run(price_var.assign([[10.]])) sess.run(bias.assign([5.])) self.assertAllClose([[15.], [55.]], self.evaluate(predictions)) def test_sparse_bias(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = get_keras_linear_model_predictions(features, [wire_cast]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(wire_cast_var)) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_and_sparse_bias(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) price = fc._numeric_column('price') with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor, 'price': [[1.], [5.]]} predictions = get_keras_linear_model_predictions(features, [wire_cast, price]) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) sess.run(price_var.assign([[10.]])) self.assertAllClose([[1015.], [10065.]], self.evaluate(predictions)) def test_dense_and_sparse_column(self): """When the column is both dense and sparse, uses sparse tensors.""" class _DenseAndSparseColumn(_DenseColumn, _CategoricalColumn): @property def name(self): return 'dense_and_sparse_column' @property def _parse_example_spec(self): return {self.name: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): return inputs.get(self.name) @property def _variable_shape(self): raise ValueError('Should not use this method.') def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): raise ValueError('Should not use this method.') @property def _num_buckets(self): return 4 def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): sp_tensor = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [1, 1]], values=[2, 0, 3], dense_shape=[2, 2]) return _CategoricalColumn.IdWeightPair(sp_tensor, None) dense_and_sparse_column = _DenseAndSparseColumn() with ops.Graph().as_default(): sp_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {dense_and_sparse_column.name: sp_tensor} predictions = get_keras_linear_model_predictions( features, [dense_and_sparse_column]) bias = get_linear_model_bias() dense_and_sparse_column_var = get_linear_model_column_var( dense_and_sparse_column) with _initialized_session() as sess: sess.run( dense_and_sparse_column_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [10015.]], self.evaluate(predictions)) def test_dense_multi_output(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} predictions = get_keras_linear_model_predictions( features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((1, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[10., 100., 1000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[15., 106., 1007.], [55., 506., 5007.]], self.evaluate(predictions)) def test_sparse_multi_output(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = get_keras_linear_model_predictions( features, [wire_cast], units=3) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((4, 3)), self.evaluate(wire_cast_var)) sess.run( wire_cast_var.assign([[10., 11., 12.], [100., 110., 120.], [1000., 1100., 1200.], [10000., 11000., 12000.]])) sess.run(bias.assign([5., 6., 7.])) self.assertAllClose([[1005., 1106., 1207.], [10015., 11017., 12019.]], self.evaluate(predictions)) def test_dense_multi_dimension(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = get_keras_linear_model_predictions(features, [price]) price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_sparse_multi_rank(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = array_ops.sparse_placeholder(dtypes.string) wire_value = sparse_tensor.SparseTensorValue( values=['omar', 'stringer', 'marlo', 'omar'], # hashed = [2, 0, 3, 2] indices=[[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 0, 1]], dense_shape=[2, 2, 2]) features = {'wire_cast': wire_tensor} predictions = get_keras_linear_model_predictions(features, [wire_cast]) wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: self.assertAllClose(np.zeros((4, 1)), self.evaluate(wire_cast_var)) self.assertAllClose( np.zeros((2, 1)), predictions.eval(feed_dict={wire_tensor: wire_value})) sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) self.assertAllClose( [[1010.], [11000.]], predictions.eval(feed_dict={wire_tensor: wire_value})) def test_sparse_combiner(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default(): wire_tensor = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], # hashed to = [2, 0, 3] indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) features = {'wire_cast': wire_tensor} predictions = get_keras_linear_model_predictions( features, [wire_cast], sparse_combiner='mean') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) with _initialized_session() as sess: sess.run(wire_cast_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[1005.], [5010.]], self.evaluate(predictions)) def test_dense_multi_dimension_multi_output(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} predictions = get_keras_linear_model_predictions( features, [price], units=3) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose(np.zeros((3,)), self.evaluate(bias)) self.assertAllClose(np.zeros((2, 3)), self.evaluate(price_var)) sess.run(price_var.assign([[1., 2., 3.], [10., 100., 1000.]])) sess.run(bias.assign([2., 3., 4.])) self.assertAllClose([[23., 205., 2007.], [67., 613., 6019.]], self.evaluate(predictions)) def test_raises_if_shape_mismatch(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): get_keras_linear_model_predictions(features, [price]) def test_dense_reshaping(self): price = fc._numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} predictions = get_keras_linear_model_predictions(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price_var.assign([[10.], [100.]])) self.assertAllClose([[210.], [650.]], self.evaluate(predictions)) def test_dense_multi_column(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} predictions = get_keras_linear_model_predictions(features, [price1, price2]) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) with _initialized_session() as sess: self.assertAllClose([0.], self.evaluate(bias)) self.assertAllClose([[0.], [0.]], self.evaluate(price1_var)) self.assertAllClose([[0.]], self.evaluate(price2_var)) self.assertAllClose([[0.], [0.]], self.evaluate(predictions)) sess.run(price1_var.assign([[10.], [100.]])) sess.run(price2_var.assign([[1000.]])) sess.run(bias.assign([7.])) self.assertAllClose([[3217.], [4657.]], self.evaluate(predictions)) def test_fills_cols_to_vars(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]} cols_to_vars = {} get_keras_linear_model_predictions( features, [price1, price2], cols_to_vars=cols_to_vars) bias = get_linear_model_bias() price1_var = get_linear_model_column_var(price1) price2_var = get_linear_model_column_var(price2) self.assertAllEqual(cols_to_vars['bias'], [bias]) self.assertAllEqual(cols_to_vars[price1], [price1_var]) self.assertAllEqual(cols_to_vars[price2], [price2_var]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2', shape=3) with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [6., 7.]], 'price2': [[3., 4., 5.], [8., 9., 10.]] } cols_to_vars = {} with variable_scope.variable_scope( 'linear', partitioner=partitioned_variables.fixed_size_partitioner(2, axis=0)): get_keras_linear_model_predictions( features, [price1, price2], cols_to_vars=cols_to_vars) with _initialized_session(): self.assertEqual([0.], cols_to_vars['bias'][0].eval()) # Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables. self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval()) self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval()) # Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and # a [1, 1] Variable. self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval()) self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval()) def test_dense_collection(self): price = fc._numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} get_keras_linear_model_predictions( features, [price], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) self.assertIn(bias, my_vars) self.assertIn(price_var, my_vars) def test_sparse_collection(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} get_keras_linear_model_predictions( features, [wire_cast], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, my_vars) self.assertIn(wire_cast_var, my_vars) def test_dense_trainable_default(self): price = fc._numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} get_keras_linear_model_predictions(features, [price]) bias = get_linear_model_bias() price_var = get_linear_model_column_var(price) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertIn(bias, trainable_vars) self.assertIn(price_var, trainable_vars) def test_sparse_trainable_default(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} get_keras_linear_model_predictions(features, [wire_cast]) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) bias = get_linear_model_bias() wire_cast_var = get_linear_model_column_var(wire_cast) self.assertIn(bias, trainable_vars) self.assertIn(wire_cast_var, trainable_vars) def test_dense_trainable_false(self): price = fc._numeric_column('price') with ops.Graph().as_default() as g: features = {'price': [[1.], [5.]]} get_keras_linear_model_predictions(features, [price], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_sparse_trainable_false(self): wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: wire_tensor = sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) features = {'wire_cast': wire_tensor} get_keras_linear_model_predictions(features, [wire_cast], trainable=False) trainable_vars = g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual([], trainable_vars) def test_column_order(self): price_a = fc._numeric_column('price_a') price_b = fc._numeric_column('price_b') wire_cast = fc._categorical_column_with_hash_bucket('wire_cast', 4) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } get_keras_linear_model_predictions( features, [price_a, wire_cast, price_b], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) with ops.Graph().as_default() as g: features = { 'price_a': [[1.]], 'price_b': [[3.]], 'wire_cast': sparse_tensor.SparseTensor( values=['omar'], indices=[[0, 0]], dense_shape=[1, 1]) } get_keras_linear_model_predictions( features, [wire_cast, price_b, price_a], weight_collections=['my-vars']) my_vars = g.get_collection('my-vars') self.assertIn('price_a', my_vars[0].name) self.assertIn('price_b', my_vars[1].name) self.assertIn('wire_cast', my_vars[2].name) def test_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string get_keras_linear_model_predictions(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') price3 = fc._numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string get_keras_linear_model_predictions(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } predictions = get_keras_linear_model_predictions(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'must have the same size and shape'): sess.run( predictions, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } predictions = get_keras_linear_model_predictions(features, [price1, price2]) with _initialized_session() as sess: sess.run( predictions, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): price = fc._numeric_column('price') price_buckets = fc._bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([ -1., 12., ]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) net = get_keras_linear_model_predictions(features, [price_buckets, body_style]) with _initialized_session() as sess: bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], self.evaluate(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): price = fc._numeric_column('price') price_buckets = fc._bucketized_column( price, boundaries=[ 0., 10., 100., ]) body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) country = fc._categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) price_data = np.array([-1., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array(['US', 'CA']) net = get_keras_linear_model_predictions( features, [price_buckets, body_style, country]) bias = get_linear_model_bias() price_buckets_var = get_linear_model_column_var(price_buckets) body_style_var = get_linear_model_column_var(body_style) with _initialized_session() as sess: sess.run(price_buckets_var.assign([[10.], [100.], [1000.], [10000.]])) sess.run(body_style_var.assign([[-10.], [-100.], [-1000.]])) sess.run(bias.assign([5.])) self.assertAllClose([[10 - 1000 + 5.], [1000 - 10 + 5.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): price = fc._numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): get_keras_linear_model_predictions(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = get_keras_linear_model_predictions(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) class InputLayerTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_retrieving_input(self): features = {'a': [0.]} input_layer = InputLayer(fc._numeric_column('a')) inputs = self.evaluate(input_layer(features)) self.assertAllClose([[0.]], inputs) def test_reuses_variables(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc._categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) input_layer = InputLayer([embedding_column]) features = {'a': sparse_input} inputs = input_layer(features) variables = input_layer.variables # Sanity check: test that the inputs are correct. self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs) # Check that only one variable was created. self.assertEqual(1, len(variables)) # Check that invoking input_layer on the same features does not create # additional variables _ = input_layer(features) self.assertEqual(1, len(variables)) self.assertEqual(variables[0], input_layer.variables[0]) def test_feature_column_input_layer_gradient(self): with context.eager_mode(): sparse_input = sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3)) # Create feature columns (categorical and embedding). categorical_column = fc._categorical_column_with_identity( key='a', num_buckets=3) embedding_dimension = 2 def _embedding_column_initializer(shape, dtype, partition_info): del shape # unused del dtype # unused del partition_info # unused embedding_values = ( (1, 0), # id 0 (0, 1), # id 1 (1, 1)) # id 2 return embedding_values embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_embedding_column_initializer) input_layer = InputLayer([embedding_column]) features = {'a': sparse_input} def scale_matrix(): matrix = input_layer(features) return 2 * matrix # Sanity check: Verify that scale_matrix returns the correct output. self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix()) # Check that the returned gradient is correct. grad_function = backprop.implicit_grad(scale_matrix) grads_and_vars = grad_function() indexed_slice = grads_and_vars[0][0] gradient = grads_and_vars[0][0].values self.assertAllEqual([0, 1, 2], indexed_slice.indices) self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient) class FunctionalInputLayerTest(test.TestCase): def test_raises_if_empty_feature_columns(self): with self.assertRaisesRegexp(ValueError, 'feature_columns must not be empty'): fc.input_layer(features={}, feature_columns=[]) def test_should_be_dense_column(self): with self.assertRaisesRegexp(ValueError, 'must be a _DenseColumn'): fc.input_layer( features={'a': [[0]]}, feature_columns=[ fc._categorical_column_with_hash_bucket('wire_cast', 4) ]) def test_does_not_support_dict_columns(self): with self.assertRaisesRegexp( ValueError, 'Expected feature_columns to be iterable, found dict.'): fc.input_layer( features={'a': [[0]]}, feature_columns={'a': fc._numeric_column('a')}) def test_bare_column(self): with ops.Graph().as_default(): features = features = {'a': [0.]} net = fc.input_layer(features, fc._numeric_column('a')) with _initialized_session(): self.assertAllClose([[0.]], self.evaluate(net)) def test_column_generator(self): with ops.Graph().as_default(): features = features = {'a': [0.], 'b': [1.]} columns = (fc._numeric_column(key) for key in features) net = fc.input_layer(features, columns) with _initialized_session(): self.assertAllClose([[0., 1.]], self.evaluate(net)) def test_raises_if_duplicate_name(self): with self.assertRaisesRegexp( ValueError, 'Duplicate feature column name found for columns'): fc.input_layer( features={'a': [[0]]}, feature_columns=[fc._numeric_column('a'), fc._numeric_column('a')]) def test_one_column(self): price = fc._numeric_column('price') with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} net = fc.input_layer(features, [price]) with _initialized_session(): self.assertAllClose([[1.], [5.]], self.evaluate(net)) def test_multi_dimension(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1., 2.], [5., 6.]]} net = fc.input_layer(features, [price]) with _initialized_session(): self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_raises_if_shape_mismatch(self): price = fc._numeric_column('price', shape=2) with ops.Graph().as_default(): features = {'price': [[1.], [5.]]} with self.assertRaisesRegexp( Exception, r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'): fc.input_layer(features, [price]) def test_reshaping(self): price = fc._numeric_column('price', shape=[1, 2]) with ops.Graph().as_default(): features = {'price': [[[1., 2.]], [[5., 6.]]]} net = fc.input_layer(features, [price]) with _initialized_session(): self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net)) def test_multi_column(self): price1 = fc._numeric_column('price1', shape=2) price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]] } net = fc.input_layer(features, [price1, price2]) with _initialized_session(): self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net)) def test_fills_cols_to_vars(self): # Provide three _DenseColumn's to input_layer: a _NumericColumn, a # _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn # creates a Variable. price1 = fc._numeric_column('price1') dense_feature = fc._numeric_column('dense_feature') dense_feature_bucketized = fc._bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) @test_util.run_deprecated_v1 def test_fills_cols_to_vars_shared_embedding(self): # Provide 5 DenseColumn's to input_layer: a NumericColumn, a # BucketizedColumn, an EmbeddingColumn, two SharedEmbeddingColumns. The # EmbeddingColumn creates a Variable and the two SharedEmbeddingColumns # shared one variable. price1 = fc._numeric_column('price1') dense_feature = fc._numeric_column('dense_feature') dense_feature_bucketized = fc._bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) shared_embedding_a, shared_embedding_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } cols_to_vars = {} all_cols = [ price1, dense_feature_bucketized, some_embedding_column, shared_embedding_a, shared_embedding_b ] fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(1, len(cols_to_vars[some_embedding_column])) self.assertEqual(1, len(cols_to_vars[shared_embedding_a])) # This is a bug in the current implementation and should be fixed in the # new one. self.assertEqual(0, len(cols_to_vars[shared_embedding_b])) self.assertIsInstance(cols_to_vars[some_embedding_column][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10]) self.assertIsInstance(cols_to_vars[shared_embedding_a][0], variables_lib.Variable) self.assertAllEqual(cols_to_vars[shared_embedding_a][0].shape, [3, 2]) def test_fills_cols_to_vars_partitioned_variables(self): price1 = fc._numeric_column('price1') dense_feature = fc._numeric_column('dense_feature') dense_feature_bucketized = fc._bucketized_column( dense_feature, boundaries=[0.]) some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'price1': [[3.], [4.]], 'dense_feature': [[-1.], [4.]], 'sparse_feature': [['a'], ['x']], } cols_to_vars = {} all_cols = [price1, dense_feature_bucketized, some_embedding_column] with variable_scope.variable_scope( 'input_from_feature_columns', partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)): fc.input_layer(features, all_cols, cols_to_vars=cols_to_vars) self.assertItemsEqual(list(cols_to_vars.keys()), all_cols) self.assertEqual(0, len(cols_to_vars[price1])) self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized])) self.assertEqual(3, len(cols_to_vars[some_embedding_column])) self.assertEqual( 'input_from_feature_columns/input_layer/sparse_feature_embedding/' 'embedding_weights/part_0:0', cols_to_vars[some_embedding_column][0].name) self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10]) self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10]) def test_column_order(self): price_a = fc._numeric_column('price_a') price_b = fc._numeric_column('price_b') with ops.Graph().as_default(): features = { 'price_a': [[1.]], 'price_b': [[3.]], } net1 = fc.input_layer(features, [price_a, price_b]) net2 = fc.input_layer(features, [price_b, price_a]) with _initialized_session(): self.assertAllClose([[1., 3.]], self.evaluate(net1)) self.assertAllClose([[1., 3.]], self.evaluate(net2)) def test_fails_for_categorical_column(self): animal = fc._categorical_column_with_identity('animal', num_buckets=4) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } with self.assertRaisesRegexp(Exception, 'must be a _DenseColumn'): fc.input_layer(features, [animal]) def test_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': [[1.], [5.], [7.]], # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.input_layer(features, [price1, price2]) def test_subset_of_static_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') price3 = fc._numeric_column('price3') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]], # batchsize = 2 'price3': [[3.], [4.], [5.]] # batchsize = 3 } with self.assertRaisesRegexp( ValueError, r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string fc.input_layer(features, [price1, price2, price3]) def test_runtime_batch_size_mismatch(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3 'price2': [[3.], [4.]] # batchsize = 2 } net = fc.input_layer(features, [price1, price2]) with _initialized_session() as sess: with self.assertRaisesRegexp(errors.OpError, 'Dimensions of inputs should match'): sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]}) def test_runtime_batch_size_matches(self): price1 = fc._numeric_column('price1') price2 = fc._numeric_column('price2') with ops.Graph().as_default(): features = { 'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2 } net = fc.input_layer(features, [price1, price2]) with _initialized_session() as sess: sess.run( net, feed_dict={ features['price1']: [[1.], [5.]], features['price2']: [[1.], [5.]], }) def test_multiple_layers_with_same_embedding_column(self): some_sparse_column = fc._categorical_column_with_hash_bucket( 'sparse_feature', hash_bucket_size=5) some_embedding_column = fc._embedding_column( some_sparse_column, dimension=10) with ops.Graph().as_default(): features = { 'sparse_feature': [['a'], ['x']], } all_cols = [some_embedding_column] fc.input_layer(features, all_cols) fc.input_layer(features, all_cols) # Make sure that 2 variables get created in this case. self.assertEqual(2, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) expected_var_names = [ 'input_layer/sparse_feature_embedding/embedding_weights:0', 'input_layer_1/sparse_feature_embedding/embedding_weights:0' ] self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_multiple_layers_with_same_shared_embedding_column(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc_new.shared_embedding_columns( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) with ops.Graph().as_default(): features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } all_cols = [embedding_column_a, embedding_column_b] fc.input_layer(features, all_cols) fc.input_layer(features, all_cols) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertItemsEqual( ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc_new.shared_embedding_columns( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) all_cols = [embedding_column_a, embedding_column_b] with ops.Graph().as_default(): features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.input_layer(features, all_cols) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) with ops.Graph().as_default(): features1 = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } fc.input_layer(features1, all_cols) # Make sure that only 1 variable gets created in this case. self.assertEqual(1, len( ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))) self.assertItemsEqual( ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'], [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) @test_util.run_deprecated_v1 def test_with_1d_sparse_tensor(self): embedding_values = ( (1., 2., 3., 4., 5.), # id 0 (6., 7., 8., 9., 10.), # id 1 (11., 12., 13., 14., 15.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc._numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc._indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc._categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc._embedding_column( country, dimension=5, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': constant_op.constant([11., 12.,]), 'body-style': sparse_tensor.SparseTensor( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)), # This is dense tensor for the categorical_column. 'country': constant_op.constant(['CA', 'US']), } self.assertEqual(1, features['price'].shape.ndims) self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0]) self.assertEqual(1, features['country'].shape.ndims) net = fc.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 5, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 11., 12., 13., 14., 15., 11.], [1., 0., 0., 1., 2., 3., 4., 5., 12.]], sess.run(net)) @test_util.run_deprecated_v1 def test_with_1d_unknown_shape_sparse_tensor(self): embedding_values = ( (1., 2.), # id 0 (6., 7.), # id 1 (11., 12.) # id 2 ) def _initializer(shape, dtype, partition_info): del shape, dtype, partition_info return embedding_values # price has 1 dimension in input_layer price = fc._numeric_column('price') # one_hot_body_style has 3 dims in input_layer. body_style = fc._categorical_column_with_vocabulary_list( 'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan']) one_hot_body_style = fc._indicator_column(body_style) # embedded_body_style has 5 dims in input_layer. country = fc._categorical_column_with_vocabulary_list( 'country', vocabulary_list=['US', 'JP', 'CA']) embedded_country = fc._embedding_column( country, dimension=2, initializer=_initializer) # Provides 1-dim tensor and dense tensor. features = { 'price': array_ops.placeholder(dtypes.float32), 'body-style': array_ops.sparse_placeholder(dtypes.string), # This is dense tensor for the categorical_column. 'country': array_ops.placeholder(dtypes.string), } self.assertIsNone(features['price'].shape.ndims) self.assertIsNone(features['body-style'].get_shape().ndims) self.assertIsNone(features['country'].shape.ndims) price_data = np.array([11., 12.]) body_style_data = sparse_tensor.SparseTensorValue( indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,)) country_data = np.array([['US'], ['CA']]) net = fc.input_layer(features, [price, one_hot_body_style, embedded_country]) self.assertEqual(1 + 3 + 2, net.shape[1]) with _initialized_session() as sess: # Each row is formed by concatenating `embedded_body_style`, # `one_hot_body_style`, and `price` in order. self.assertAllEqual( [[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]], sess.run( net, feed_dict={ features['price']: price_data, features['body-style']: body_style_data, features['country']: country_data })) @test_util.run_deprecated_v1 def test_with_rank_0_feature(self): # price has 1 dimension in input_layer price = fc._numeric_column('price') features = { 'price': constant_op.constant(0), } self.assertEqual(0, features['price'].shape.ndims) # Static rank 0 should fail with self.assertRaisesRegexp(ValueError, 'Feature .* cannot have rank 0'): fc.input_layer(features, [price]) # Dynamic rank 0 should fail features = { 'price': array_ops.placeholder(dtypes.float32), } net = fc.input_layer(features, [price]) self.assertEqual(1, net.shape[1]) with _initialized_session() as sess: with self.assertRaisesOpError('Feature .* cannot have rank 0'): sess.run(net, feed_dict={features['price']: np.array(1)}) class MakeParseExampleSpecTest(test.TestCase): class _TestFeatureColumn(_FeatureColumn, collections.namedtuple('_TestFeatureColumn', ['parse_spec'])): @property def _parse_example_spec(self): return self.parse_spec def test_no_feature_columns(self): actual = fc.make_parse_example_spec([]) self.assertDictEqual({}, actual) def test_invalid_type(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) with self.assertRaisesRegexp( ValueError, 'All feature_columns must be _FeatureColumn instances.*invalid_column'): fc.make_parse_example_spec( (self._TestFeatureColumn({key1: parse_spec1}), 'invalid_column')) def test_one_feature_column(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) actual = fc.make_parse_example_spec( (self._TestFeatureColumn({key1: parse_spec1}),)) self.assertDictEqual({key1: parse_spec1}, actual) def test_two_feature_columns(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) key2 = 'key2' parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) actual = fc.make_parse_example_spec( (self._TestFeatureColumn({key1: parse_spec1}), self._TestFeatureColumn({key2: parse_spec2}))) self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual) def test_equal_keys_different_parse_spec(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) with self.assertRaisesRegexp( ValueError, 'feature_columns contain different parse_spec for key key1'): fc.make_parse_example_spec( (self._TestFeatureColumn({key1: parse_spec1}), self._TestFeatureColumn({key1: parse_spec2}))) def test_equal_keys_equal_parse_spec(self): key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) actual = fc.make_parse_example_spec( (self._TestFeatureColumn({key1: parse_spec1}), self._TestFeatureColumn({key1: parse_spec1}))) self.assertDictEqual({key1: parse_spec1}, actual) def test_multiple_features_dict(self): """parse_spc for one column is a dict with length > 1.""" key1 = 'key1' parse_spec1 = parsing_ops.FixedLenFeature( shape=(2,), dtype=dtypes.float32, default_value=0.) key2 = 'key2' parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string) key3 = 'key3' parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32) actual = fc.make_parse_example_spec( (self._TestFeatureColumn({key1: parse_spec1}), self._TestFeatureColumn({key2: parse_spec2, key3: parse_spec3}))) self.assertDictEqual( {key1: parse_spec1, key2: parse_spec2, key3: parse_spec3}, actual) def _assert_sparse_tensor_value(test_case, expected, actual): test_case.assertEqual(np.int64, np.array(actual.indices).dtype) test_case.assertAllEqual(expected.indices, actual.indices) test_case.assertEqual( np.array(expected.values).dtype, np.array(actual.values).dtype) test_case.assertAllEqual(expected.values, actual.values) test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype) test_case.assertAllEqual(expected.dense_shape, actual.dense_shape) class VocabularyFileCategoricalColumnTest(test.TestCase): def setUp(self): super(VocabularyFileCategoricalColumnTest, self).setUp() # Contains ints, Golden State Warriors jersey numbers: 30, 35, 11, 23, 22 self._warriors_vocabulary_file_name = test.test_src_dir_path( 'python/feature_column/testdata/warriors_vocabulary.txt') self._warriors_vocabulary_size = 5 # Contains strings, character names from 'The Wire': omar, stringer, marlo self._wire_vocabulary_file_name = test.test_src_dir_path( 'python/feature_column/testdata/wire_vocabulary.txt') self._wire_vocabulary_size = 3 @test_util.run_deprecated_v1 def test_defaults(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column._var_scope_name) self.assertEqual('aaa', column.key) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, column._parse_example_spec) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc._categorical_column_with_vocabulary_file( key=('aaa',), vocabulary_file='path_to_file', vocabulary_size=3) @test_util.run_deprecated_v1 def test_all_constructor_args(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3, num_oov_buckets=4, dtype=dtypes.int32) self.assertEqual(7, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column._parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3, num_oov_buckets=4, dtype=dtypes.int32) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(7, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column._parse_example_spec) def test_vocabulary_file_none(self): with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'): fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=None, vocabulary_size=3) def test_vocabulary_file_empty_string(self): with self.assertRaisesRegexp(ValueError, 'Missing vocabulary_file'): fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='', vocabulary_size=3) @test_util.run_deprecated_v1 def test_invalid_vocabulary_file(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='file_does_not_exist', vocabulary_size=10) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) with self.assertRaisesRegexp(errors.OpError, 'file_does_not_exist'): with self.cached_session(): lookup_ops.tables_initializer().run() def test_invalid_vocabulary_size(self): with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'): fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=-1) with self.assertRaisesRegexp(ValueError, 'Invalid vocabulary_size'): fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=0) @test_util.run_deprecated_v1 def test_too_large_vocabulary_size(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size + 1) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) with self.assertRaisesRegexp(errors.OpError, 'Invalid vocab_size'): with self.cached_session(): lookup_ops.tables_initializer().run() def test_invalid_num_oov_buckets(self): with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'): fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path', vocabulary_size=3, num_oov_buckets=-1) def test_invalid_dtype(self): with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path', vocabulary_size=3, dtype=dtypes.float64) def test_invalid_buckets_and_default_value(self): with self.assertRaisesRegexp( ValueError, 'both num_oov_buckets and default_value'): fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=100, default_value=2) def test_invalid_input_dtype_int32(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, dtype=dtypes.string) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(12, 24, 36), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) def test_invalid_input_dtype_string(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file='path_to_file', vocabulary_size=3) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a])) self.assertIn('aaa', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), features['aaa'].eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_none_vocabulary_size(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array( (2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_tensor = _transform_features({'aaa': inputs}, [column])[column] with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) def test_get_sparse_tensors_weight_collections(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) inputs = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) column._get_sparse_tensors( _LazyBuilder({ 'aaa': inputs }), weight_collections=('my_weights',)) self.assertItemsEqual( [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) self.assertItemsEqual([], ops.get_collection('my_weights')) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size) id_weight_pair = column._get_sparse_tensors( _LazyBuilder({ 'aaa': (('marlo', ''), ('skywalker', 'omar')) })) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((2, -1, 0), dtype=np.int64), dense_shape=(2, 2)), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_default_value_in_vocabulary(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, default_value=2) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 2, 0), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_oov_buckets(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (1, 2)), values=('marlo', 'skywalker', 'omar', 'heisenberg'), dense_shape=(2, 3)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 33, 0, 62), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_small_vocabulary_size(self): # 'marlo' is the last entry in our vocabulary file, so be setting # `vocabulary_size` to 1 less than number of entries in file, we take # 'marlo' out of the vocabulary. column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size - 1) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((-1, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_dense_input(self): default_value = -100 column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32, default_value=default_value) id_weight_pair = column._get_sparse_tensors( _LazyBuilder({ 'aaa': ((11, -1, -1), (100, 30, -1), (-1, -1, 22)) })) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((2, default_value, 0, 4), dtype=np.int64), dense_shape=(3, 3)), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_with_oov_buckets(self): column = fc._categorical_column_with_vocabulary_file( key='aaa', vocabulary_file=self._warriors_vocabulary_file_name, vocabulary_size=self._warriors_vocabulary_size, dtype=dtypes.int32, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 60, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc._categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = fc.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval() # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): wire_column = fc._categorical_column_with_vocabulary_file( key='wire', vocabulary_file=self._wire_vocabulary_file_name, vocabulary_size=self._wire_vocabulary_size, num_oov_buckets=1) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval() # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) class VocabularyListCategoricalColumnTest(test.TestCase): def test_defaults_string(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual('aaa', column._var_scope_name) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.string) }, column._parse_example_spec) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc._categorical_column_with_vocabulary_list( key=('aaa',), vocabulary_list=('omar', 'stringer', 'marlo')) def test_defaults_int(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36)) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual('aaa', column._var_scope_name) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column._parse_example_spec) @test_util.run_deprecated_v1 def test_all_constructor_args(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32, default_value=-99) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column._parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.int32) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int32) }, column._parse_example_spec) def test_invalid_dtype(self): with self.assertRaisesRegexp(ValueError, 'dtype must be string or integer'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), dtype=dtypes.float32) def test_invalid_mapping_dtype(self): with self.assertRaisesRegexp( ValueError, r'vocabulary dtype must be string or integer'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12., 24., 36.)) def test_mismatched_int_dtype(self): with self.assertRaisesRegexp( ValueError, r'dtype.*and vocabulary dtype.*do not match'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), dtype=dtypes.int32) def test_mismatched_string_dtype(self): with self.assertRaisesRegexp( ValueError, r'dtype.*and vocabulary dtype.*do not match'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), dtype=dtypes.string) def test_none_mapping(self): with self.assertRaisesRegexp( ValueError, r'vocabulary_list.*must be non-empty'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=None) def test_empty_mapping(self): with self.assertRaisesRegexp( ValueError, r'vocabulary_list.*must be non-empty'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=tuple([])) def test_duplicate_mapping(self): with self.assertRaisesRegexp(ValueError, 'Duplicate keys'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 12)) def test_invalid_num_oov_buckets(self): with self.assertRaisesRegexp(ValueError, 'Invalid num_oov_buckets'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=-1) def test_invalid_buckets_and_default_value(self): with self.assertRaisesRegexp( ValueError, 'both num_oov_buckets and default_value'): fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36), num_oov_buckets=100, default_value=2) def test_invalid_input_dtype_int32(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(12, 24, 36), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) def test_invalid_input_dtype_string(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(12, 24, 36)) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'dtype must be compatible'): column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) @test_util.run_deprecated_v1 def test_parse_example_string(self): a = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a])) self.assertIn('aaa', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), features['aaa'].eval()) @test_util.run_deprecated_v1 def test_parse_example_int(self): a = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=(11, 21, 31)) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(int64_list=feature_pb2.Int64List( value=[11, 21])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a])) self.assertIn('aaa', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=[11, 21], dense_shape=[1, 2]), features['aaa'].eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_tensor = _transform_features({'aaa': inputs}, [column])[column] with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) def test_get_sparse_tensors_weight_collections(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) inputs = sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) column._get_sparse_tensors( _LazyBuilder({ 'aaa': inputs }), weight_collections=('my_weights',)) self.assertItemsEqual( [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) self.assertItemsEqual([], ops.get_collection('my_weights')) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) id_weight_pair = column._get_sparse_tensors( _LazyBuilder({ 'aaa': (('marlo', ''), ('skywalker', 'omar')) })) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((2, -1, 0), dtype=np.int64), dense_shape=(2, 2)), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_default_value_in_vocabulary(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), default_value=2) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 2, 0), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_oov_buckets(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (1, 2)), values=('marlo', 'skywalker', 'omar', 'heisenberg'), dense_shape=(2, 3)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 33, 0, 62), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((11, 100, 30, 22), dtype=np.int32), dense_shape=(3, 3)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, -1, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_dense_input(self): default_value = -100 column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32, default_value=default_value) id_weight_pair = column._get_sparse_tensors( _LazyBuilder({ 'aaa': np.array( ((11, -1, -1), (100, 30, -1), (-1, -1, 22)), dtype=np.int32) })) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=np.array((2, default_value, 0, 4), dtype=np.int64), dense_shape=(3, 3)), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_int32_with_oov_buckets(self): column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=np.array((30, 35, 11, 23, 22), dtype=np.int32), dtype=dtypes.int32, num_oov_buckets=100) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1), (2, 2)), values=(11, 100, 30, 22), dense_shape=(3, 3)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((2, 60, 0, 4), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_linear_model(self): wire_column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = fc.linear_model({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval() # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): wire_column = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo'), num_oov_buckets=1) self.assertEqual(4, wire_column._num_buckets) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ wire_column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) }, (wire_column,)) bias = get_linear_model_bias() wire_var = get_linear_model_column_var(wire_column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,), (0.,)), self.evaluate(wire_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) wire_var.assign(((1.,), (2.,), (3.,), (4.,))).eval() # 'marlo' -> 2: wire_var[2] = 3 # 'skywalker' -> 3, 'omar' -> 0: wire_var[3] + wire_var[0] = 4+1 = 5 self.assertAllClose(((3.,), (5.,)), self.evaluate(predictions)) class IdentityCategoricalColumnTest(test.TestCase): def test_constructor(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual('aaa', column.name) self.assertEqual('aaa', column.key) self.assertEqual('aaa', column._var_scope_name) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column._parse_example_spec) def test_key_should_be_string(self): with self.assertRaisesRegexp(ValueError, 'key must be a string.'): fc._categorical_column_with_identity(key=('aaa',), num_buckets=3) @test_util.run_deprecated_v1 def test_deep_copy(self): original = fc._categorical_column_with_identity(key='aaa', num_buckets=3) for column in (original, copy.deepcopy(original)): self.assertEqual('aaa', column.name) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, column._parse_example_spec) def test_invalid_num_buckets_zero(self): with self.assertRaisesRegexp(ValueError, 'num_buckets 0 < 1'): fc._categorical_column_with_identity(key='aaa', num_buckets=0) def test_invalid_num_buckets_negative(self): with self.assertRaisesRegexp(ValueError, 'num_buckets -1 < 1'): fc._categorical_column_with_identity(key='aaa', num_buckets=-1) def test_invalid_default_value_too_small(self): with self.assertRaisesRegexp(ValueError, 'default_value -1 not in range'): fc._categorical_column_with_identity( key='aaa', num_buckets=3, default_value=-1) def test_invalid_default_value_too_big(self): with self.assertRaisesRegexp(ValueError, 'default_value 3 not in range'): fc._categorical_column_with_identity( key='aaa', num_buckets=3, default_value=3) def test_invalid_input_dtype(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'Invalid input, not integer'): column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc._categorical_column_with_identity(key='aaa', num_buckets=30) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(int64_list=feature_pb2.Int64List( value=[11, 21])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a])) self.assertIn('aaa', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([11, 21], dtype=np.int64), dense_shape=[1, 2]), features['aaa'].eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((0, 1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_transform_feature(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) id_tensor = _transform_features({'aaa': inputs}, [column])[column] with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((0, 1, 0), dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) def test_get_sparse_tensors_weight_collections(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) column._get_sparse_tensors( _LazyBuilder({ 'aaa': inputs }), weight_collections=('my_weights',)) self.assertItemsEqual( [], ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) self.assertItemsEqual([], ops.get_collection('my_weights')) @test_util.run_deprecated_v1 def test_get_sparse_tensors_dense_input(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) id_weight_pair = column._get_sparse_tensors( _LazyBuilder({ 'aaa': ((0, -1), (1, 0)) })) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((0, 1, 0), dtype=np.int64), dense_shape=(2, 2)), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_inputs_too_small(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, -1, 0), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): with self.assertRaisesRegexp( errors.OpError, 'assert_greater_or_equal_0'): id_weight_pair.id_tensor.eval() @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_inputs_too_big(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, 99, 0), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): with self.assertRaisesRegexp( errors.OpError, 'assert_less_than_num_buckets'): id_weight_pair.id_tensor.eval() @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_default_value(self): column = fc._categorical_column_with_identity( key='aaa', num_buckets=4, default_value=3) inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(1, -1, 99), dense_shape=(2, 2)) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array((1, 3, 3), dtype=np.int64), dense_shape=inputs.dense_shape), id_weight_pair.id_tensor.eval()) @test_util.run_deprecated_v1 def test_get_sparse_tensors_with_default_value_and_placeholder_inputs(self): column = fc._categorical_column_with_identity( key='aaa', num_buckets=4, default_value=3) input_indices = array_ops.placeholder(dtype=dtypes.int64) input_values = array_ops.placeholder(dtype=dtypes.int32) input_shape = array_ops.placeholder(dtype=dtypes.int64) inputs = sparse_tensor.SparseTensorValue( indices=input_indices, values=input_values, dense_shape=input_shape) id_weight_pair = column._get_sparse_tensors(_LazyBuilder({'aaa': inputs})) self.assertIsNone(id_weight_pair.weight_tensor) with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=np.array(((0, 0), (1, 0), (1, 1)), dtype=np.int64), values=np.array((1, 3, 3), dtype=np.int64), dense_shape=np.array((2, 2), dtype=np.int64)), id_weight_pair.id_tensor.eval(feed_dict={ input_indices: ((0, 0), (1, 0), (1, 1)), input_values: (1, -1, 99), input_shape: (2, 2), })) @test_util.run_deprecated_v1 def test_linear_model(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(3, column._num_buckets) with ops.Graph().as_default(): predictions = fc.linear_model({ column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) weight_var.assign(((1.,), (2.,), (3.,))).eval() # weight_var[0] = 1 # weight_var[2] + weight_var[1] = 3+2 = 5 self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): column = fc._categorical_column_with_identity(key='aaa', num_buckets=3) self.assertEqual(3, column._num_buckets) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ column.name: sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) weight_var.assign(((1.,), (2.,), (3.,))).eval() # weight_var[0] = 1 # weight_var[2] + weight_var[1] = 3+2 = 5 self.assertAllClose(((1.,), (5.,)), self.evaluate(predictions)) class TransformFeaturesTest(test.TestCase): # All transform tests are distributed in column test. # Here we only test multi column case and naming def transform_multi_column(self): bucketized_price = fc._bucketized_column( fc._numeric_column('price'), boundaries=[0, 2, 4, 6]) hashed_sparse = fc._categorical_column_with_hash_bucket('wire', 10) with ops.Graph().as_default(): features = { 'price': [[-1.], [5.]], 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [1, 1]], dense_shape=[2, 2]) } transformed = _transform_features(features, [bucketized_price, hashed_sparse]) with _initialized_session(): self.assertIn(bucketized_price.name, transformed[bucketized_price].name) self.assertAllEqual([[0], [3]], transformed[bucketized_price].eval()) self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name) self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values.eval()) def test_column_order(self): """When the column is both dense and sparse, uses sparse tensors.""" class _LoggerColumn(_FeatureColumn): def __init__(self, name): self._name = name @property def name(self): return self._name def _transform_feature(self, inputs): del inputs self.call_order = call_logger['count'] call_logger['count'] += 1 return 'Anything' @property def _parse_example_spec(self): pass with ops.Graph().as_default(): column1 = _LoggerColumn('1') column2 = _LoggerColumn('2') call_logger = {'count': 0} _transform_features({}, [column1, column2]) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order) call_logger = {'count': 0} _transform_features({}, [column2, column1]) self.assertEqual(0, column1.call_order) self.assertEqual(1, column2.call_order) class IndicatorColumnTest(test.TestCase): def test_indicator_column(self): a = fc._categorical_column_with_hash_bucket('a', 4) indicator_a = fc._indicator_column(a) self.assertEqual(indicator_a.categorical_column.name, 'a') self.assertEqual(indicator_a.name, 'a_indicator') self.assertEqual(indicator_a._var_scope_name, 'a_indicator') self.assertEqual(indicator_a._variable_shape, [1, 4]) b = fc._categorical_column_with_hash_bucket('b', hash_bucket_size=100) indicator_b = fc._indicator_column(b) self.assertEqual(indicator_b.categorical_column.name, 'b') self.assertEqual(indicator_b.name, 'b_indicator') self.assertEqual(indicator_b._var_scope_name, 'b_indicator') self.assertEqual(indicator_b._variable_shape, [1, 100]) def test_1D_shape_succeeds(self): animal = fc._indicator_column( fc._categorical_column_with_hash_bucket('animal', 4)) builder = _LazyBuilder({'animal': ['fox', 'fox']}) output = builder.get(animal) with self.cached_session(): self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], self.evaluate(output)) def test_2D_shape_succeeds(self): # TODO(ispir/cassandrax): Swith to categorical_column_with_keys when ready. animal = fc._indicator_column( fc._categorical_column_with_hash_bucket('animal', 4)) builder = _LazyBuilder({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0]], values=['fox', 'fox'], dense_shape=[2, 1]) }) output = builder.get(animal) with self.cached_session(): self.assertAllEqual([[0., 0., 1., 0.], [0., 0., 1., 0.]], self.evaluate(output)) def test_multi_hot(self): animal = fc._indicator_column( fc._categorical_column_with_identity('animal', num_buckets=4)) builder = _LazyBuilder({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 1], dense_shape=[1, 2]) }) output = builder.get(animal) with self.cached_session(): self.assertAllEqual([[0., 2., 0., 0.]], self.evaluate(output)) def test_multi_hot2(self): animal = fc._indicator_column( fc._categorical_column_with_identity('animal', num_buckets=4)) builder = _LazyBuilder({ 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) }) output = builder.get(animal) with self.cached_session(): self.assertAllEqual([[0., 1., 1., 0.]], self.evaluate(output)) @test_util.run_deprecated_v1 def test_deep_copy(self): a = fc._categorical_column_with_hash_bucket('a', 4) column = fc._indicator_column(a) column_copy = copy.deepcopy(column) self.assertEqual(column_copy.categorical_column.name, 'a') self.assertEqual(column.name, 'a_indicator') self.assertEqual(column._variable_shape, [1, 4]) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_indicator = fc._indicator_column(a) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a_indicator])) self.assertIn('aaa', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), features['aaa'].eval()) @test_util.run_deprecated_v1 def test_transform(self): a = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_indicator = fc._indicator_column(a) features = { 'aaa': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('marlo', 'skywalker', 'omar'), dense_shape=(2, 2)) } indicator_tensor = _transform_features(features, [a_indicator])[a_indicator] with _initialized_session(): self.assertAllEqual([[0, 0, 1], [1, 0, 0]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_weighted_column(self): # Github issue 12557 ids = fc._categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) weights = fc._weighted_categorical_column(ids, 'weights') indicator = fc._indicator_column(weights) features = { 'ids': constant_op.constant([['c', 'b', 'a', 'c']]), 'weights': constant_op.constant([[2., 4., 6., 1.]]) } indicator_tensor = _transform_features(features, [indicator])[indicator] with _initialized_session(): self.assertAllEqual([[6., 4., 3.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_missing_value_in_weighted_column(self): # Github issue 12583 ids = fc._categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) weights = fc._weighted_categorical_column(ids, 'weights') indicator = fc._indicator_column(weights) features = { 'ids': constant_op.constant([['c', 'b', 'unknown']]), 'weights': constant_op.constant([[2., 4., 6.]]) } indicator_tensor = _transform_features(features, [indicator])[indicator] with _initialized_session(): self.assertAllEqual([[0., 4., 2.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_transform_with_missing_value_in_categorical_column(self): # Github issue 12583 ids = fc._categorical_column_with_vocabulary_list( key='ids', vocabulary_list=('a', 'b', 'c')) indicator = fc._indicator_column(ids) features = { 'ids': constant_op.constant([['c', 'b', 'unknown']]), } indicator_tensor = _transform_features(features, [indicator])[indicator] with _initialized_session(): self.assertAllEqual([[0., 1., 1.]], self.evaluate(indicator_tensor)) @test_util.run_deprecated_v1 def test_linear_model(self): animal = fc._indicator_column( fc._categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } predictions = fc.linear_model(features, [animal]) weight_var = get_linear_model_column_var(animal) with _initialized_session(): # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) weight_var.assign([[1.], [2.], [3.], [4.]]).eval() self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): animal = fc._indicator_column( fc._categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } predictions = get_keras_linear_model_predictions(features, [animal]) weight_var = get_linear_model_column_var(animal) with _initialized_session(): # All should be zero-initialized. self.assertAllClose([[0.], [0.], [0.], [0.]], self.evaluate(weight_var)) self.assertAllClose([[0.]], self.evaluate(predictions)) weight_var.assign([[1.], [2.], [3.], [4.]]).eval() self.assertAllClose([[2. + 3.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_input_layer(self): animal = fc._indicator_column( fc._categorical_column_with_identity('animal', num_buckets=4)) with ops.Graph().as_default(): features = { 'animal': sparse_tensor.SparseTensor( indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2]) } net = fc.input_layer(features, [animal]) with _initialized_session(): self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net)) class EmbeddingColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('mean', embedding_column.combiner) self.assertIsNone(embedding_column.ckpt_to_load_from) self.assertIsNone(embedding_column.tensor_name_in_ckpt) self.assertIsNone(embedding_column.max_norm) self.assertTrue(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual('aaa_embedding', embedding_column._var_scope_name) self.assertEqual( (embedding_dimension,), embedding_column._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column._parse_example_spec) @test_util.run_deprecated_v1 def test_all_constructor_args(self): categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual('aaa_embedding', embedding_column._var_scope_name) self.assertEqual( (embedding_dimension,), embedding_column._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column._parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 original = fc._embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) for embedding_column in (original, copy.deepcopy(original)): self.assertEqual('aaa', embedding_column.categorical_column.name) self.assertEqual(3, embedding_column.categorical_column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column.categorical_column._parse_example_spec) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_ckpt', embedding_column.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column.tensor_name_in_ckpt) self.assertEqual(42., embedding_column.max_norm) self.assertFalse(embedding_column.trainable) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual( (embedding_dimension,), embedding_column._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column._parse_example_spec) @test_util.run_deprecated_v1 def test_invalid_initializer(self): categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=3) with self.assertRaisesRegexp(ValueError, 'initializer must be callable'): fc._embedding_column( categorical_column, dimension=2, initializer='not_fn') @test_util.run_deprecated_v1 def test_parse_example(self): a = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_embedded = fc._embedding_column(a, dimension=2) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a_embedded])) self.assertIn('aaa', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), features['aaa'].eval()) @test_util.run_deprecated_v1 def test_transform_feature(self): a = fc._categorical_column_with_identity(key='aaa', num_buckets=3) a_embedded = fc._embedding_column(a, dimension=2) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) } outputs = _transform_features(features, [a, a_embedded]) output_a = outputs[a] output_embedded = outputs[a_embedded] with _initialized_session(): _assert_sparse_tensor_value(self, self.evaluate(output_a), self.evaluate(output_embedded)) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup = embedding_column._get_dense_tensor( _LazyBuilder({ 'aaa': sparse_input })) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) with _initialized_session(): self.assertAllEqual(embedding_values, global_vars[0].eval()) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_3d(self): # Inputs. vocabulary_size = 4 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0, 0), (1, 1, 0), (1, 1, 4), (3, 0, 0), (3, 1, 2)), values=(2, 0, 1, 1, 2), dense_shape=(4, 2, 5)) # Embedding variable. embedding_dimension = 3 embedding_values = ( (1., 2., 4.), # id 0 (3., 5., 1.), # id 1 (7., 11., 2.), # id 2 (2., 7., 12.) # id 3 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [[2], []], embedding = [[7, 11, 2], [0, 0, 0]] ((7., 11., 2.), (0., 0., 0.)), # example 1, ids [[], [0, 1]], embedding # = mean([[], [1, 2, 4] + [3, 5, 1]]) = [[0, 0, 0], [2, 3.5, 2.5]] ((0., 0., 0.), (2., 3.5, 2.5)), # example 2, ids [[], []], embedding = [[0, 0, 0], [0, 0, 0]] ((0., 0., 0.), (0., 0., 0.)), # example 3, ids [[1], [2]], embedding = [[3, 5, 1], [7, 11, 2]] ((3., 5., 1.), (7., 11., 2.)), ) # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup = embedding_column._get_dense_tensor( _LazyBuilder({ 'aaa': sparse_input })) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) with _initialized_session(): self.assertAllEqual(embedding_values, global_vars[0].eval()) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_get_dense_tensor_weight_collections(self): sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=3) embedding_column = fc._embedding_column(categorical_column, dimension=2) # Provide sparse input and get dense result. embedding_column._get_dense_tensor( _LazyBuilder({ 'aaa': sparse_input }), weight_collections=('my_vars',)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) my_vars = ops.get_collection('my_vars') self.assertItemsEqual( ('embedding_weights:0',), tuple([v.name for v in my_vars])) @test_util.run_deprecated_v1 def test_get_dense_tensor_placeholder_inputs(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. input_indices = array_ops.placeholder(dtype=dtypes.int64) input_values = array_ops.placeholder(dtype=dtypes.int64) input_shape = array_ops.placeholder(dtype=dtypes.int64) embedding_lookup = embedding_column._get_dense_tensor( _LazyBuilder({ 'aaa': sparse_tensor.SparseTensorValue( indices=input_indices, values=input_values, dense_shape=input_shape) })) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ('embedding_weights:0',), tuple([v.name for v in global_vars])) with _initialized_session(): self.assertAllEqual(embedding_values, global_vars[0].eval()) self.assertAllEqual(expected_lookups, embedding_lookup.eval( feed_dict={ input_indices: sparse_input.indices, input_values: sparse_input.values, input_shape: sparse_input.dense_shape, })) @test_util.run_deprecated_v1 def test_get_dense_tensor_restore_from_ckpt(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. The checkpoint file contains _embedding_values. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) ckpt_path = test.test_src_dir_path( 'python/feature_column/testdata/embedding.ckpt') ckpt_tensor = 'my_embedding' # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, ckpt_to_load_from=ckpt_path, tensor_name_in_ckpt=ckpt_tensor) # Provide sparse input and get dense result. embedding_lookup = embedding_column._get_dense_tensor( _LazyBuilder({ 'aaa': sparse_input })) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ('embedding_weights:0',), tuple([v.name for v in global_vars])) with _initialized_session(): self.assertAllEqual(embedding_values, global_vars[0].eval()) self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup)) @test_util.run_deprecated_v1 def test_linear_model(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc.linear_model({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars[ 'linear_model/aaa_embedding/weights:0'] with _initialized_session(): # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose( np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 )).eval() linear_weights.assign(((4.,), (6.,))).eval() # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): # Inputs. batch_size = 4 vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(batch_size, 5)) # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ categorical_column.name: sparse_input }, (embedding_column,)) expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_embedding/weights:0', 'linear_model/aaa_embedding/embedding_weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_embedding/embedding_weights:0'] linear_weights = trainable_vars['linear_model/aaa_embedding/weights:0'] with _initialized_session(): # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights)) self.assertAllClose( np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 )).eval() linear_weights.assign(((4.,), (6.,))).eval() # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # example 2, ids [], embedding[2] = [0, 0] # example 3, ids [1], embedding[3] = [3, 5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5, 4*0 + 6*0, 4*3 + 6*5] = [94, 29, 0, 42] self.assertAllClose(((94.,), (29.,), (0.,), (42.,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_input_layer(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) self.assertItemsEqual( ('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in trainable_vars])) with _initialized_session(): self.assertAllEqual(embedding_values, trainable_vars[0].eval()) self.assertAllEqual(expected_lookups, self.evaluate(input_layer)) @test_util.run_deprecated_v1 def test_input_layer_not_trainable(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = fc._embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer, trainable=False) # Provide sparse input and get dense result. input_layer = fc.input_layer({'aaa': sparse_input}, (embedding_column,)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ('input_layer/aaa_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) self.assertItemsEqual( [], ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)) with _initialized_session(): self.assertAllEqual(embedding_values, global_vars[0].eval()) self.assertAllEqual(expected_lookups, self.evaluate(input_layer)) class SharedEmbeddingColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = fc_new.shared_embedding_columns( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(embedding_dimension, embedding_column_a.dimension) self.assertEqual(embedding_dimension, embedding_column_b.dimension) self.assertEqual('mean', embedding_column_a.combiner) self.assertEqual('mean', embedding_column_b.combiner) self.assertIsNone(embedding_column_a.ckpt_to_load_from) self.assertIsNone(embedding_column_b.ckpt_to_load_from) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_a.shared_embedding_collection_name) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_b.shared_embedding_collection_name) self.assertIsNone(embedding_column_a.tensor_name_in_ckpt) self.assertIsNone(embedding_column_b.tensor_name_in_ckpt) self.assertIsNone(embedding_column_a.max_norm) self.assertIsNone(embedding_column_b.max_norm) self.assertTrue(embedding_column_a.trainable) self.assertTrue(embedding_column_b.trainable) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual( 'aaa_bbb_shared_embedding', embedding_column_a._var_scope_name) self.assertEqual( 'aaa_bbb_shared_embedding', embedding_column_b._var_scope_name) self.assertEqual( (embedding_dimension,), embedding_column_a._variable_shape) self.assertEqual( (embedding_dimension,), embedding_column_b._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a._parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b._parse_example_spec) @test_util.run_deprecated_v1 def test_all_constructor_args(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_a, embedding_column_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='shared_embedding_collection_name', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(embedding_dimension, embedding_column_a.dimension) self.assertEqual(embedding_dimension, embedding_column_b.dimension) self.assertEqual('my_combiner', embedding_column_a.combiner) self.assertEqual('my_combiner', embedding_column_b.combiner) self.assertEqual('shared_embedding_collection_name', embedding_column_a.shared_embedding_collection_name) self.assertEqual('shared_embedding_collection_name', embedding_column_b.shared_embedding_collection_name) self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from) self.assertEqual('my_ckpt', embedding_column_b.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt) self.assertEqual('my_ckpt_tensor', embedding_column_b.tensor_name_in_ckpt) self.assertEqual(42., embedding_column_a.max_norm) self.assertEqual(42., embedding_column_b.max_norm) self.assertFalse(embedding_column_a.trainable) self.assertFalse(embedding_column_b.trainable) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual( 'shared_embedding_collection_name', embedding_column_a._var_scope_name) self.assertEqual( 'shared_embedding_collection_name', embedding_column_b._var_scope_name) self.assertEqual( (embedding_dimension,), embedding_column_a._variable_shape) self.assertEqual( (embedding_dimension,), embedding_column_b._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a._parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b._parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 original_a, _ = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='shared_embedding_collection_name', ckpt_to_load_from='my_ckpt', tensor_name_in_ckpt='my_ckpt_tensor', max_norm=42., trainable=False) for embedding_column_a in (original_a, copy.deepcopy(original_a)): self.assertEqual('aaa', embedding_column_a.categorical_column.name) self.assertEqual(3, embedding_column_a.categorical_column._num_buckets) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a.categorical_column._parse_example_spec) self.assertEqual(embedding_dimension, embedding_column_a.dimension) self.assertEqual('my_combiner', embedding_column_a.combiner) self.assertEqual('shared_embedding_collection_name', embedding_column_a.shared_embedding_collection_name) self.assertEqual('my_ckpt', embedding_column_a.ckpt_to_load_from) self.assertEqual('my_ckpt_tensor', embedding_column_a.tensor_name_in_ckpt) self.assertEqual(42., embedding_column_a.max_norm) self.assertFalse(embedding_column_a.trainable) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual( (embedding_dimension,), embedding_column_a._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a._parse_example_spec) @test_util.run_deprecated_v1 def test_invalid_initializer(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) with self.assertRaisesRegexp(ValueError, 'initializer must be callable'): fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=2, initializer='not_fn') @test_util.run_deprecated_v1 def test_incompatible_column_type(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) categorical_column_c = fc._categorical_column_with_hash_bucket( key='ccc', hash_bucket_size=3) with self.assertRaisesRegexp( ValueError, 'all categorical_columns must have the same type.*' '_IdentityCategoricalColumn.*_HashedCategoricalColumn'): fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b, categorical_column_c], dimension=2) @test_util.run_deprecated_v1 def test_weighted_categorical_column_ok(self): categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=3) weighted_categorical_column_a = fc._weighted_categorical_column( categorical_column_a, weight_feature_key='aaa_weights') categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=3) weighted_categorical_column_b = fc._weighted_categorical_column( categorical_column_b, weight_feature_key='bbb_weights') fc_new.shared_embedding_columns( [weighted_categorical_column_a, categorical_column_b], dimension=2) fc_new.shared_embedding_columns( [categorical_column_a, weighted_categorical_column_b], dimension=2) fc_new.shared_embedding_columns( [weighted_categorical_column_a, weighted_categorical_column_b], dimension=2) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) b = fc._categorical_column_with_vocabulary_list( key='bbb', vocabulary_list=('omar', 'stringer', 'marlo')) a_embedded, b_embedded = fc_new.shared_embedding_columns([a, b], dimension=2) data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), 'bbb': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'stringer', b'marlo'])), })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a_embedded, b_embedded])) self.assertIn('aaa', features) self.assertIn('bbb', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), features['aaa'].eval()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'stringer', b'marlo'], dtype=np.object_), dense_shape=[1, 2]), features['bbb'].eval()) @test_util.run_deprecated_v1 def test_transform_feature(self): a = fc._categorical_column_with_identity(key='aaa', num_buckets=3) b = fc._categorical_column_with_identity(key='bbb', num_buckets=3) a_embedded, b_embedded = fc_new.shared_embedding_columns([a, b], dimension=2) features = { 'aaa': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)), 'bbb': sparse_tensor.SparseTensor( indices=((0, 0), (1, 0), (1, 1)), values=(1, 2, 1), dense_shape=(2, 2)), } outputs = _transform_features(features, [a, a_embedded, b, b_embedded]) output_a = outputs[a] output_a_embedded = outputs[a_embedded] output_b = outputs[b] output_b_embedded = outputs[b_embedded] with _initialized_session(): _assert_sparse_tensor_value(self, self.evaluate(output_a), self.evaluate(output_a_embedded)) _assert_sparse_tensor_value(self, self.evaluate(output_b), self.evaluate(output_b_embedded)) @test_util.run_deprecated_v1 def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array( [[2, -1, -1], # example 0, ids [2] [0, 1, -1]]) # example 1, ids [0, 1] input_b = np.array( [[0, -1, -1], # example 0, ids [0] [-1, -1, -1]]) # example 1, ids [] input_features = { 'aaa': input_a, 'bbb': input_b } # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups_a = ( # example 0: (7., 11.), # ids [2], embedding = [7, 11] # example 1: (2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] ) expected_lookups_b = ( # example 0: (1., 2.), # ids [0], embedding = [1, 2] # example 1: (0., 0.), # ids [], embedding = [0, 0] ) # Build columns. categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a._get_dense_tensor( _LazyBuilder(input_features)) embedding_lookup_b = embedding_column_b._get_dense_tensor( _LazyBuilder(input_features)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) embedding_var = global_vars[0] with _initialized_session(): self.assertAllEqual(embedding_values, self.evaluate(embedding_var)) self.assertAllEqual(expected_lookups_a, self.evaluate(embedding_lookup_a)) self.assertAllEqual(expected_lookups_b, self.evaluate(embedding_lookup_b)) @test_util.run_deprecated_v1 def test_get_dense_tensor_weight_collections(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] input_features = {'aaa': input_a, 'bbb': input_b} # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Build columns. categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) fc.input_layer( input_features, [embedding_column_a, embedding_column_b], weight_collections=('my_vars',)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',), tuple(v.name for v in global_vars)) my_vars = ops.get_collection('my_vars') self.assertItemsEqual( ('input_layer/aaa_bbb_shared_embedding/embedding_weights:0',), tuple(v.name for v in my_vars)) @test_util.run_deprecated_v1 def test_get_dense_tensor_placeholder_inputs(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array( [[2, -1, -1], # example 0, ids [2] [0, 1, -1]]) # example 1, ids [0, 1] input_b = np.array( [[0, -1, -1], # example 0, ids [0] [-1, -1, -1]]) # example 1, ids [] # Specify shape, because dense input must have rank specified. input_a_placeholder = array_ops.placeholder( dtype=dtypes.int64, shape=[None, 3]) input_b_placeholder = array_ops.placeholder( dtype=dtypes.int64, shape=[None, 3]) input_features = { 'aaa': input_a_placeholder, 'bbb': input_b_placeholder, } feed_dict = { input_a_placeholder: input_a, input_b_placeholder: input_b, } # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Build columns. categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a._get_dense_tensor( _LazyBuilder(input_features)) embedding_lookup_b = embedding_column_b._get_dense_tensor( _LazyBuilder(input_features)) with _initialized_session() as sess: sess.run([embedding_lookup_a, embedding_lookup_b], feed_dict=feed_dict) @test_util.run_deprecated_v1 def test_linear_model(self): # Inputs. batch_size = 2 vocabulary_size = 3 # -1 values are ignored. input_a = np.array( [[2, -1, -1], # example 0, ids [2] [0, 1, -1]]) # example 1, ids [0, 1] input_b = np.array( [[0, -1, -1], # example 0, ids [0] [-1, -1, -1]]) # example 1, ids [] # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = fc.linear_model({ categorical_column_a.name: input_a, categorical_column_b.name: input_b, }, (embedding_column_a, embedding_column_b)) # Linear weights do not follow the column name. But this is a rare use # case, and fixing it would add too much complexity to the code. expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_bbb_shared_embedding/weights:0', 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0', 'linear_model/aaa_bbb_shared_embedding_1/weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0'] linear_weights_a = trainable_vars[ 'linear_model/aaa_bbb_shared_embedding/weights:0'] linear_weights_b = trainable_vars[ 'linear_model/aaa_bbb_shared_embedding_1/weights:0'] with _initialized_session(): # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_a)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_b)) self.assertAllClose( np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 )).eval() linear_weights_a.assign(((4.,), (6.,))).eval() # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29] linear_weights_b.assign(((3.,), (5.,))).eval() # example 0, ids [0], embedding[0] = [1, 2] # example 1, ids [], embedding[1] = 0, 0] # sum(embeddings * linear_weights) # = [3*1 + 5*2, 3*0 +5*0] = [13, 0] self.assertAllClose([[94. + 13.], [29.]], self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): # Inputs. batch_size = 2 vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] # Embedding variable. embedding_dimension = 2 embedding_shape = (vocabulary_size, embedding_dimension) zeros_embedding_values = np.zeros(embedding_shape) def _initializer(shape, dtype, partition_info): self.assertAllEqual(embedding_shape, shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return zeros_embedding_values # Build columns. categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ categorical_column_a.name: input_a, categorical_column_b.name: input_b, }, (embedding_column_a, embedding_column_b)) # Linear weights do not follow the column name. But this is a rare use # case, and fixing it would add too much complexity to the code. expected_var_names = ( 'linear_model/bias_weights:0', 'linear_model/aaa_bbb_shared_embedding/weights:0', 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0', 'linear_model/aaa_bbb_shared_embedding_1/weights:0', ) self.assertItemsEqual( expected_var_names, [v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)]) trainable_vars = { v.name: v for v in ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) } self.assertItemsEqual(expected_var_names, trainable_vars.keys()) bias = trainable_vars['linear_model/bias_weights:0'] embedding_weights = trainable_vars[ 'linear_model/aaa_bbb_shared_embedding/embedding_weights:0'] linear_weights_a = trainable_vars[ 'linear_model/aaa_bbb_shared_embedding/weights:0'] linear_weights_b = trainable_vars[ 'linear_model/aaa_bbb_shared_embedding_1/weights:0'] with _initialized_session(): # Predictions with all zero weights. self.assertAllClose(np.zeros((1,)), self.evaluate(bias)) self.assertAllClose(zeros_embedding_values, self.evaluate(embedding_weights)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_a)) self.assertAllClose( np.zeros((embedding_dimension, 1)), self.evaluate(linear_weights_b)) self.assertAllClose( np.zeros((batch_size, 1)), self.evaluate(predictions)) # Predictions with all non-zero weights. embedding_weights.assign(( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 )).eval() linear_weights_a.assign(((4.,), (6.,))).eval() # example 0, ids [2], embedding[0] = [7, 11] # example 1, ids [0, 1], embedding[1] = mean([1, 2] + [3, 5]) = [2, 3.5] # sum(embeddings * linear_weights) # = [4*7 + 6*11, 4*2 + 6*3.5] = [94, 29] linear_weights_b.assign(((3.,), (5.,))).eval() # example 0, ids [0], embedding[0] = [1, 2] # example 1, ids [], embedding[1] = 0, 0] # sum(embeddings * linear_weights) # = [3*1 + 5*2, 3*0 +5*0] = [13, 0] self.assertAllClose([[94. + 13.], [29.]], self.evaluate(predictions)) def _test_input_layer(self, trainable=True): # Inputs. vocabulary_size = 3 sparse_input_a = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 4)), values=(2, 0, 1), dense_shape=(2, 5)) sparse_input_b = sparse_tensor.SparseTensorValue( # example 0, ids [0] # example 1, ids [] indices=((0, 0),), values=(0,), dense_shape=(2, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0: # A ids [2], embedding = [7, 11] # B ids [0], embedding = [1, 2] (7., 11., 1., 2.), # example 1: # A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] # B ids [], embedding = [0, 0] (2., 3.5, 0., 0.), ) # Build columns. categorical_column_a = fc._categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc._categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = fc_new.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer, trainable=trainable) # Provide sparse input and get dense result. input_layer = fc.input_layer( features={'aaa': sparse_input_a, 'bbb': sparse_input_b}, feature_columns=(embedding_column_b, embedding_column_a)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'], tuple([v.name for v in global_vars])) trainable_vars = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES) if trainable: self.assertItemsEqual( ['input_layer/aaa_bbb_shared_embedding/embedding_weights:0'], tuple([v.name for v in trainable_vars])) else: self.assertItemsEqual([], tuple([v.name for v in trainable_vars])) shared_embedding_vars = global_vars with _initialized_session(): self.assertAllEqual(embedding_values, shared_embedding_vars[0].eval()) self.assertAllEqual(expected_lookups, self.evaluate(input_layer)) @test_util.run_deprecated_v1 def test_input_layer(self): self._test_input_layer() @test_util.run_deprecated_v1 def test_input_layer_no_trainable(self): self._test_input_layer(trainable=False) class WeightedCategoricalColumnTest(test.TestCase): @test_util.run_deprecated_v1 def test_defaults(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') self.assertEqual('ids_weighted_by_values', column.name) self.assertEqual('ids_weighted_by_values', column._var_scope_name) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'ids': parsing_ops.VarLenFeature(dtypes.int64), 'values': parsing_ops.VarLenFeature(dtypes.float32) }, column._parse_example_spec) @test_util.run_deprecated_v1 def test_deep_copy(self): """Tests deepcopy of categorical_column_with_hash_bucket.""" original = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') for column in (original, copy.deepcopy(original)): self.assertEqual('ids_weighted_by_values', column.name) self.assertEqual(3, column._num_buckets) self.assertEqual({ 'ids': parsing_ops.VarLenFeature(dtypes.int64), 'values': parsing_ops.VarLenFeature(dtypes.float32) }, column._parse_example_spec) def test_invalid_dtype_none(self): with self.assertRaisesRegexp(ValueError, 'is not convertible to float'): fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values', dtype=None) def test_invalid_dtype_string(self): with self.assertRaisesRegexp(ValueError, 'is not convertible to float'): fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values', dtype=dtypes.string) def test_invalid_input_dtype(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') strings = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp(ValueError, 'Bad dtype'): _transform_features({'ids': strings, 'values': strings}, (column,)) def test_column_name_collision(self): with self.assertRaisesRegexp(ValueError, r'Parse config.*already exists'): fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='aaa', num_buckets=3), weight_feature_key='aaa')._parse_example_spec() def test_missing_weights(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=('omar', 'stringer', 'marlo'), dense_shape=(2, 2)) with self.assertRaisesRegexp( ValueError, 'values is not in features dictionary'): _transform_features({'ids': inputs}, (column,)) @test_util.run_deprecated_v1 def test_parse_example(self): a = fc._categorical_column_with_vocabulary_list( key='aaa', vocabulary_list=('omar', 'stringer', 'marlo')) a_weighted = fc._weighted_categorical_column( a, weight_feature_key='weights') data = example_pb2.Example(features=feature_pb2.Features( feature={ 'aaa': feature_pb2.Feature(bytes_list=feature_pb2.BytesList( value=[b'omar', b'stringer'])), 'weights': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=[1., 10.])) })) features = parsing_ops.parse_example( serialized=[data.SerializeToString()], features=fc.make_parse_example_spec([a_weighted])) self.assertIn('aaa', features) self.assertIn('weights', features) with self.cached_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([b'omar', b'stringer'], dtype=np.object_), dense_shape=[1, 2]), features['aaa'].eval()) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=[[0, 0], [0, 1]], values=np.array([1., 10.], dtype=np.float32), dense_shape=[1, 2]), features['weights'].eval()) @test_util.run_deprecated_v1 def test_transform_features(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 1, 0), dense_shape=(2, 2)) weights = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0.5, 1.0, 0.1), dense_shape=(2, 2)) id_tensor, weight_tensor = _transform_features({ 'ids': inputs, 'values': weights, }, (column,))[column] with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array(inputs.values, dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=weights.indices, values=np.array(weights.values, dtype=np.float32), dense_shape=weights.dense_shape), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_transform_features_dense_input(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') weights = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0.5, 1.0, 0.1), dense_shape=(2, 2)) id_tensor, weight_tensor = _transform_features({ 'ids': ((0, -1), (1, 0)), 'values': weights, }, (column,))[column] with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((0, 1, 0), dtype=np.int64), dense_shape=(2, 2)), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=weights.indices, values=np.array(weights.values, dtype=np.float32), dense_shape=weights.dense_shape), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_transform_features_dense_weights(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') inputs = sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(2, 1, 0), dense_shape=(2, 2)) id_tensor, weight_tensor = _transform_features({ 'ids': inputs, 'values': ((.5, 0.), (1., .1)), }, (column,))[column] with _initialized_session(): _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=inputs.indices, values=np.array(inputs.values, dtype=np.int64), dense_shape=inputs.dense_shape), self.evaluate(id_tensor)) _assert_sparse_tensor_value( self, sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=np.array((.5, 1., .1), dtype=np.float32), dense_shape=(2, 2)), self.evaluate(weight_tensor)) @test_util.run_deprecated_v1 def test_keras_linear_model(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) weight_var.assign(((1.,), (2.,), (3.,))).eval() # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_keras_linear_model_mismatched_shape(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): with self.assertRaisesRegexp(ValueError, r'Dimensions.*are not compatible'): get_keras_linear_model_predictions({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (0, 1), (1, 0), (1, 1)), values=(.5, 11., 1., .1), dense_shape=(2, 2)) }, (column,)) def test_keras_linear_model_mismatched_dense_values(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions( { 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,)) }, (column,), sparse_combiner='mean') # Disabling the constant folding optimizer here since it changes the # error message differently on CPU and GPU. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) with _initialized_session(config): with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'): self.evaluate(predictions) def test_keras_linear_model_mismatched_dense_shape(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = get_keras_linear_model_predictions({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,), (.1,)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) weight_var.assign(((1.,), (2.,), (3.,))).eval() # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) @test_util.run_deprecated_v1 def test_linear_model(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(.5, 1., .1), dense_shape=(2, 2)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) weight_var.assign(((1.,), (2.,), (3.,))).eval() # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) def test_linear_model_mismatched_shape(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): with self.assertRaisesRegexp( ValueError, r'Dimensions.*are not compatible'): fc.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': sparse_tensor.SparseTensorValue( indices=((0, 0), (0, 1), (1, 0), (1, 1)), values=(.5, 11., 1., .1), dense_shape=(2, 2)) }, (column,)) def test_linear_model_mismatched_dense_values(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc.linear_model( { 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,)) }, (column,), sparse_combiner='mean') # Disabling the constant folding optimizer here since it changes the # error message differently on CPU and GPU. config = config_pb2.ConfigProto() config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) with _initialized_session(config): with self.assertRaisesRegexp(errors.OpError, 'Incompatible shapes'): self.evaluate(predictions) def test_linear_model_mismatched_dense_shape(self): column = fc._weighted_categorical_column( categorical_column=fc._categorical_column_with_identity( key='ids', num_buckets=3), weight_feature_key='values') with ops.Graph().as_default(): predictions = fc.linear_model({ 'ids': sparse_tensor.SparseTensorValue( indices=((0, 0), (1, 0), (1, 1)), values=(0, 2, 1), dense_shape=(2, 2)), 'values': ((.5,), (1.,), (.1,)) }, (column,)) bias = get_linear_model_bias() weight_var = get_linear_model_column_var(column) with _initialized_session(): self.assertAllClose((0.,), self.evaluate(bias)) self.assertAllClose(((0.,), (0.,), (0.,)), self.evaluate(weight_var)) self.assertAllClose(((0.,), (0.,)), self.evaluate(predictions)) weight_var.assign(((1.,), (2.,), (3.,))).eval() # weight_var[0] * weights[0, 0] = 1 * .5 = .5 # weight_var[2] * weights[1, 0] + weight_var[1] * weights[1, 1] # = 3*1 + 2*.1 = 3+.2 = 3.2 self.assertAllClose(((.5,), (3.2,)), self.evaluate(predictions)) # TODO(ptucker): Add test with embedding of weighted categorical. if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/feature_column/feature_column_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """FeatureColumn serialization, deserialization logic.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from tensorflow.python.feature_column import feature_column_v2 as fc_lib from tensorflow.python.feature_column import sequence_feature_column as sfc_lib from tensorflow.python.ops import init_ops _FEATURE_COLUMNS = [ fc_lib.BucketizedColumn, fc_lib.EmbeddingColumn, fc_lib.HashedCategoricalColumn, fc_lib.IdentityCategoricalColumn, fc_lib.IndicatorColumn, fc_lib.NumericColumn, fc_lib.SequenceCategoricalColumn, fc_lib.SequenceDenseColumn, fc_lib.SharedEmbeddingColumn, fc_lib.VocabularyFileCategoricalColumn, fc_lib.VocabularyListCategoricalColumn, fc_lib.WeightedCategoricalColumn, init_ops.TruncatedNormal, sfc_lib.SequenceNumericColumn ] def serialize_feature_column(fc): """Serializes a FeatureColumn or a raw string key. This method should only be used to serialize parent FeatureColumns when implementing FeatureColumn._get_config(), else serialize_feature_columns() is preferable. This serialization also keeps information of the FeatureColumn class, so deserialization is possible without knowing the class type. For example: a = numeric_column('x') a._get_config() gives: { 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } While serialize_feature_column(a) gives: { 'class_name': 'NumericColumn', 'config': { 'key': 'price', 'shape': (1,), 'default_value': None, 'dtype': 'float32', 'normalizer_fn': None } } Args: fc: A FeatureColumn or raw feature key string. Returns: Keras serialization for FeatureColumns, leaves string keys unaffected. Raises: ValueError if called with input that is not string or FeatureColumn. """ # Import here to avoid circular imports. from tensorflow.python.keras.utils import generic_utils # pylint: disable=g-import-not-at-top if isinstance(fc, six.string_types): return fc elif isinstance(fc, fc_lib.FeatureColumn): return generic_utils.serialize_keras_class_and_config( fc.__class__.__name__, fc._get_config()) # pylint: disable=protected-access else: raise ValueError('Instance: {} is not a FeatureColumn'.format(fc)) def deserialize_feature_column(config, custom_objects=None, columns_by_name=None): """Deserializes a `config` generated with `serialize_feature_column`. This method should only be used to deserialize parent FeatureColumns when implementing FeatureColumn._from_config(), else deserialize_feature_columns() is preferable. Returns a FeatureColumn for this config. TODO(b/118939620): Simplify code if Keras utils support object deduping. Args: config: A Dict with the serialization of feature columns acquired by `serialize_feature_column`, or a string representing a raw column. custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). columns_by_name: A Dict[String, FeatureColumn] of existing columns in order to avoid duplication. Raises: ValueError if `config` has invalid format (e.g: expected keys missing, or refers to unknown classes). Returns: A FeatureColumn corresponding to the input `config`. """ # Import here to avoid circular imports. from tensorflow.python.keras.utils import generic_utils # pylint: disable=g-import-not-at-top if isinstance(config, six.string_types): return config # A dict from class_name to class for all FeatureColumns in this module. # FeatureColumns not part of the module can be passed as custom_objects. module_feature_column_classes = { cls.__name__: cls for cls in _FEATURE_COLUMNS} if columns_by_name is None: columns_by_name = {} (cls, cls_config) = generic_utils.class_and_config_for_serialized_keras_object( config, module_objects=module_feature_column_classes, custom_objects=custom_objects, printable_module_name='feature_column_v2') if not issubclass(cls, fc_lib.FeatureColumn): raise ValueError( 'Expected FeatureColumn class, instead found: {}'.format(cls)) # Always deserialize the FeatureColumn, in order to get the name. new_instance = cls._from_config( # pylint: disable=protected-access cls_config, custom_objects=custom_objects, columns_by_name=columns_by_name) # If the name already exists, re-use the column from columns_by_name, # (new_instance remains unused). return columns_by_name.setdefault(new_instance.name, new_instance) def serialize_feature_columns(feature_columns): """Serializes a list of FeatureColumns. Returns a list of Keras-style config dicts that represent the input FeatureColumns and can be used with `deserialize_feature_columns` for reconstructing the original columns. Args: feature_columns: A list of FeatureColumns. Returns: Keras serialization for the list of FeatureColumns. Raises: ValueError if called with input that is not a list of FeatureColumns. """ return [serialize_feature_column(fc) for fc in feature_columns] def deserialize_feature_columns(configs, custom_objects=None): """Deserializes a list of FeatureColumns configs. Returns a list of FeatureColumns given a list of config dicts acquired by `serialize_feature_columns`. Args: configs: A list of Dicts with the serialization of feature columns acquired by `serialize_feature_columns`. custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). Returns: FeatureColumn objects corresponding to the input configs. Raises: ValueError if called with input that is not a list of FeatureColumns. """ columns_by_name = {} return [ deserialize_feature_column(c, custom_objects, columns_by_name) for c in configs ]
tensorflow-master
tensorflow/python/feature_column/serialization.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """estimator_lib python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import estimator_lib # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True estimator_lib.__all__ = [ s for s in dir(estimator_lib) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.estimator_lib import *
tensorflow-master
tensorflow/python/estimator/estimator_lib.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """gc python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import gc # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True gc.__all__ = [s for s in dir(gc) if not s.startswith('__')] from tensorflow_estimator.python.estimator.gc import *
tensorflow-master
tensorflow/python/estimator/gc.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """keras python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import keras # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True keras.__all__ = [s for s in dir(keras) if not s.startswith('__')] from tensorflow_estimator.python.estimator.keras import *
tensorflow-master
tensorflow/python/estimator/keras.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """util python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import util # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True util.__all__ = [s for s in dir(util) if not s.startswith('__')] from tensorflow_estimator.python.estimator.util import *
tensorflow-master
tensorflow/python/estimator/util.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """exporter python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import exporter # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True exporter.__all__ = [s for s in dir(exporter) if not s.startswith('__')] from tensorflow_estimator.python.estimator.exporter import *
tensorflow-master
tensorflow/python/estimator/exporter.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """estimator python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python import estimator # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True estimator.__all__ = [s for s in dir(estimator) if not s.startswith('__')] from tensorflow_estimator.python.estimator import *
tensorflow-master
tensorflow/python/estimator/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """model_fn python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import model_fn # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True model_fn.__all__ = [s for s in dir(model_fn) if not s.startswith('__')] from tensorflow_estimator.python.estimator.model_fn import *
tensorflow-master
tensorflow/python/estimator/model_fn.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """run_config python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import run_config # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True run_config.__all__ = [s for s in dir(run_config) if not s.startswith('__')] from tensorflow_estimator.python.estimator.run_config import *
tensorflow-master
tensorflow/python/estimator/run_config.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """estimator python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import estimator # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True estimator.__all__ = [s for s in dir(estimator) if not s.startswith('__')] from tensorflow_estimator.python.estimator.estimator import *
tensorflow-master
tensorflow/python/estimator/estimator.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """training python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import training # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True training.__all__ = [s for s in dir(training) if not s.startswith('__')] from tensorflow_estimator.python.estimator.training import *
tensorflow-master
tensorflow/python/estimator/training.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """boosted_trees_utils python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import boosted_trees_utils # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True boosted_trees_utils.__all__ = [ s for s in dir(boosted_trees_utils) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.canned.boosted_trees_utils import *
tensorflow-master
tensorflow/python/estimator/canned/boosted_trees_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """dnn python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import dnn # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True dnn.__all__ = [s for s in dir(dnn) if not s.startswith('__')] from tensorflow_estimator.python.estimator.canned.dnn import *
tensorflow-master
tensorflow/python/estimator/canned/dnn.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """metric_keys python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import metric_keys # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True metric_keys.__all__ = [s for s in dir(metric_keys) if not s.startswith('__')] from tensorflow_estimator.python.estimator.canned.metric_keys import *
tensorflow-master
tensorflow/python/estimator/canned/metric_keys.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """baseline python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import baseline # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True baseline.__all__ = [s for s in dir(baseline) if not s.startswith('__')] from tensorflow_estimator.python.estimator.canned.baseline import *
tensorflow-master
tensorflow/python/estimator/canned/baseline.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """dnn_testing_utils python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import dnn_testing_utils # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True dnn_testing_utils.__all__ = [ s for s in dir(dnn_testing_utils) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.canned.dnn_testing_utils import *
tensorflow-master
tensorflow/python/estimator/canned/dnn_testing_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """linear python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import linear # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True linear.__all__ = [s for s in dir(linear) if not s.startswith('__')] from tensorflow_estimator.python.estimator.canned.linear import *
tensorflow-master
tensorflow/python/estimator/canned/linear.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """canned python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import canned # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True canned.__all__ = [s for s in dir(canned) if not s.startswith('__')] from tensorflow_estimator.python.estimator.canned import *
tensorflow-master
tensorflow/python/estimator/canned/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """dnn_linear_combined python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import dnn_linear_combined # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True dnn_linear_combined.__all__ = [ s for s in dir(dnn_linear_combined) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.canned.dnn_linear_combined import *
tensorflow-master
tensorflow/python/estimator/canned/dnn_linear_combined.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """optimizers python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import optimizers # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True optimizers.__all__ = [s for s in dir(optimizers) if not s.startswith('__')] from tensorflow_estimator.python.estimator.canned.optimizers import *
tensorflow-master
tensorflow/python/estimator/canned/optimizers.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """boosted_trees python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import boosted_trees # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True boosted_trees.__all__ = [ s for s in dir(boosted_trees) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.canned.boosted_trees import *
tensorflow-master
tensorflow/python/estimator/canned/boosted_trees.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """head python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import head # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True head.__all__ = [s for s in dir(head) if not s.startswith('__')] from tensorflow_estimator.python.estimator.canned.head import *
tensorflow-master
tensorflow/python/estimator/canned/head.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """parsing_utils python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import parsing_utils # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True parsing_utils.__all__ = [ s for s in dir(parsing_utils) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.canned.parsing_utils import *
tensorflow-master
tensorflow/python/estimator/canned/parsing_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """prediction_keys python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import prediction_keys # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True prediction_keys.__all__ = [ s for s in dir(prediction_keys) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.canned.prediction_keys import *
tensorflow-master
tensorflow/python/estimator/canned/prediction_keys.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """linear_testing_utils python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.canned import linear_testing_utils # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True linear_testing_utils.__all__ = [ s for s in dir(linear_testing_utils) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.canned.linear_testing_utils import *
tensorflow-master
tensorflow/python/estimator/canned/linear_testing_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """export_lib python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.export import export_lib # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True export_lib.__all__ = [s for s in dir(export_lib) if not s.startswith('__')] from tensorflow_estimator.python.estimator.export.export_lib import *
tensorflow-master
tensorflow/python/estimator/export/export_lib.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """export python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import export # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True export.__all__ = [s for s in dir(export) if not s.startswith('__')] from tensorflow_estimator.python.estimator.export import *
tensorflow-master
tensorflow/python/estimator/export/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """export python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.export import export # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True export.__all__ = [s for s in dir(export) if not s.startswith('__')] from tensorflow_estimator.python.estimator.export.export import *
tensorflow-master
tensorflow/python/estimator/export/export.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """export_output python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.export import export_output # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True export_output.__all__ = [ s for s in dir(export_output) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.export.export_output import *
tensorflow-master
tensorflow/python/estimator/export/export_output.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """numpy_io python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.inputs import numpy_io # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True numpy_io.__all__ = [s for s in dir(numpy_io) if not s.startswith('__')] from tensorflow_estimator.python.estimator.inputs.numpy_io import *
tensorflow-master
tensorflow/python/estimator/inputs/numpy_io.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """pandas_io python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.inputs import pandas_io # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True pandas_io.__all__ = [s for s in dir(pandas_io) if not s.startswith('__')] from tensorflow_estimator.python.estimator.inputs.pandas_io import *
tensorflow-master
tensorflow/python/estimator/inputs/pandas_io.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """inputs python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator import inputs # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True inputs.__all__ = [s for s in dir(inputs) if not s.startswith('__')] from tensorflow_estimator.python.estimator.inputs import *
tensorflow-master
tensorflow/python/estimator/inputs/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """inputs python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.inputs import inputs # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True inputs.__all__ = [s for s in dir(inputs) if not s.startswith('__')] from tensorflow_estimator.python.estimator.inputs.inputs import *
tensorflow-master
tensorflow/python/estimator/inputs/inputs.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """feeding_functions python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.inputs.queues import feeding_functions # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True feeding_functions.__all__ = [ s for s in dir(feeding_functions) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.inputs.queues.feeding_functions import *
tensorflow-master
tensorflow/python/estimator/inputs/queues/feeding_functions.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """queues python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.inputs import queues # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True queues.__all__ = [s for s in dir(queues) if not s.startswith('__')] from tensorflow_estimator.python.estimator.inputs.queues import *
tensorflow-master
tensorflow/python/estimator/inputs/queues/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """feeding_queue_runner python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.inputs.queues import feeding_queue_runner # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True feeding_queue_runner.__all__ = [ s for s in dir(feeding_queue_runner) if not s.startswith('__') ] from tensorflow_estimator.python.estimator.inputs.queues.feeding_queue_runner import *
tensorflow-master
tensorflow/python/estimator/inputs/queues/feeding_queue_runner.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """All user ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import gen_user_ops as _gen_user_ops # go/tf-wildcard-import from tensorflow.python.ops.gen_user_ops import * # pylint: disable=wildcard-import from tensorflow.python.util.tf_export import tf_export @tf_export(v1=['user_ops.my_fact']) def my_fact(): """Example of overriding the generated code for an Op.""" return _gen_user_ops.fact()
tensorflow-master
tensorflow/python/user_ops/user_ops.py
tensorflow-master
tensorflow/python/user_ops/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the swig wrapper of clusters.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import device_properties_pb2 from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.grappler import cluster from tensorflow.python.grappler import item from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test class ClusterTest(test.TestCase): def testBasic(self): with ops.Graph().as_default() as g: a = random_ops.random_uniform(shape=()) b = random_ops.random_uniform(shape=()) c = a + b train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) grappler_cluster = cluster.Cluster( disable_detailed_stats=False, disable_timeline=False) op_perfs, run_time, step_stats = grappler_cluster.MeasureCosts( grappler_item) self.assertTrue(run_time > 0) self.assertEqual(len(op_perfs), 4) self.assertTrue(step_stats.dev_stats) def testNoDetailedStats(self): with ops.Graph().as_default() as g: a = random_ops.random_uniform(shape=()) b = random_ops.random_uniform(shape=()) c = a + b train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) grappler_cluster = cluster.Cluster(disable_detailed_stats=True) op_perfs, run_time, step_stats = grappler_cluster.MeasureCosts( grappler_item) self.assertTrue(run_time > 0) self.assertEqual(len(op_perfs), 0) self.assertEqual(len(step_stats.dev_stats), 0) def testMemoryEstimates(self): with ops.Graph().as_default() as g: with ops.device('/job:localhost/replica:0/task:0/device:CPU:0'): a = random_ops.random_uniform(shape=()) b = random_ops.random_uniform(shape=()) c = a + b train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) grappler_cluster = cluster.Cluster( disable_detailed_stats=True, disable_timeline=True) peak_mem = grappler_cluster.DeterminePeakMemoryUsage(grappler_item) self.assertLessEqual(1, len(peak_mem)) snapshot = peak_mem['/job:localhost/replica:0/task:0/device:CPU:0'] peak_usage = snapshot[0] self.assertEqual(52, peak_usage) live_tensors = snapshot[1] self.assertEqual(15, len(live_tensors)) def testVirtualCluster(self): with ops.Graph().as_default() as g: with ops.device('/device:GPU:0'): a = random_ops.random_uniform(shape=[1024, 1024]) b = random_ops.random_uniform(shape=[1024, 1024]) c = a + b train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) device_properties = device_properties_pb2.DeviceProperties( type='GPU', frequency=1000, num_cores=60, environment={'architecture': '7'}) named_device = device_properties_pb2.NamedDevice( properties=device_properties, name='/device:GPU:0') grappler_cluster = cluster.Cluster( disable_detailed_stats=False, disable_timeline=False, devices=[named_device]) op_perfs, run_time, _ = grappler_cluster.MeasureCosts(grappler_item) self.assertEqual(run_time, 0.000545) self.assertEqual(len(op_perfs), 15) estimated_perf = grappler_cluster.EstimatePerformance(named_device) self.assertEqual(7680.0, estimated_perf) def testContext(self): with ops.Graph().as_default() as g: a = random_ops.random_uniform(shape=()) b = random_ops.random_uniform(shape=()) c = a + b train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) with cluster.Provision( disable_detailed_stats=False, disable_timeline=False) as gcluster: op_perfs, run_time, step_stats = gcluster.MeasureCosts(grappler_item) self.assertTrue(run_time > 0) self.assertEqual(len(op_perfs), 4) self.assertTrue(step_stats.dev_stats) def testAvailableOps(self): with cluster.Provision() as gcluster: op_names = gcluster.ListAvailableOps() self.assertTrue('Add' in op_names) self.assertTrue('MatMul' in op_names) self.assertEqual(op_names, sorted(op_names)) def testSupportDevices(self): with ops.Graph().as_default() as g: a = random_ops.random_uniform(shape=(2, 3)) b = random_ops.random_uniform(shape=(2, 3)) c = a + b dims = math_ops.range(0, array_ops.rank(c), 1) d = math_ops.reduce_sum(a, axis=dims) train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) device_properties = device_properties_pb2.DeviceProperties( type='GPU', frequency=1000, num_cores=60) named_gpu = device_properties_pb2.NamedDevice( properties=device_properties, name='/GPU:0') device_properties = device_properties_pb2.DeviceProperties( type='CPU', frequency=3000, num_cores=6) named_cpu = device_properties_pb2.NamedDevice( properties=device_properties, name='/CPU:0') virtual_cluster = cluster.Cluster(devices=[named_cpu, named_gpu]) supported_dev = virtual_cluster.GetSupportedDevices(grappler_item) self.assertEqual(supported_dev['add'], ['/CPU:0', '/GPU:0']) self.assertEqual(supported_dev['Sum'], ['/CPU:0', '/GPU:0']) self.assertEqual(supported_dev['range'], ['/CPU:0', '/GPU:0']) real_cluster = cluster.Cluster() supported_dev = real_cluster.GetSupportedDevices(grappler_item) if test.is_gpu_available(): self.assertEqual(supported_dev['add'], [ '/job:localhost/replica:0/task:0/device:CPU:0', '/job:localhost/replica:0/task:0/device:GPU:0' ]) self.assertEqual(supported_dev['Sum'], [ '/job:localhost/replica:0/task:0/device:CPU:0', '/job:localhost/replica:0/task:0/device:GPU:0' ]) # The axis tensor must reside on the host self.assertEqual(supported_dev['range'], ['/job:localhost/replica:0/task:0/device:CPU:0']) else: self.assertEqual(supported_dev['add'], ['/job:localhost/replica:0/task:0/device:CPU:0']) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/cluster_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the swig wrapper tf_optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.grappler import item as gitem from tensorflow.python.grappler import tf_optimizer from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class PyWrapOptimizeGraphTest(test.TestCase): @test_util.run_deprecated_v1 def testBasic(self): """Make sure arguments can be passed correctly.""" a = constant_op.constant(10, name='a') b = constant_op.constant(20, name='b') c = math_ops.add_n([a, b], name='c') d = math_ops.add_n([b, c], name='d') train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) # Being a train_op will make 'd' to be added as a fetch node. train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) config = config_pb2.ConfigProto() rewriter_config = config.graph_options.rewrite_options rewriter_config.optimizers.append('constfold') rewriter_config.min_graph_nodes = -1 graph = tf_optimizer.OptimizeGraph(config, mg) self.assertEqual(len(graph.node), 1) self.assertItemsEqual([node.name for node in graph.node], ['d']) @test_util.run_v1_only('b/120545219') def testKeepNodes(self): g = ops.Graph() with g.as_default(): a1 = variables.VariableV1( 1.0) # Must be preserved since it's in the collection 'variables'. a2 = constant_op.constant(0, shape=[50, 50], name='keep') ops.add_to_collection('a2', a2) # Explicitly add to collection. with g._attr_scope( {'_grappler_do_not_remove': attr_value_pb2.AttrValue(b=True)}): a3 = constant_op.constant(0, name='keep2') b = constant_op.constant(1, shape=[100, 10]) c = constant_op.constant(0, shape=[10, 30]) d = math_ops.matmul(b, c) ops.add_to_collection('train_op', d) # d is the fetch node. # Optimize the graph. mg = meta_graph.create_meta_graph_def(graph=g) config = config_pb2.ConfigProto() rewriter_config = config.graph_options.rewrite_options rewriter_config.min_graph_nodes = -1 optimized_graph = tf_optimizer.OptimizeGraph(config, mg) # Check that the nodes referenced in various collections have been preserved optimized_graph_nodes = [node.name for node in optimized_graph.node] expected_nodes = [ d.op.name, a1.op.name, a2.op.name, a3.op.name, 'Variable/initial_value', 'Variable/Assign' ] self.assertEqual(len(optimized_graph_nodes), len(expected_nodes)) self.assertAllInSet(optimized_graph_nodes, expected_nodes) @test_util.run_v1_only('b/120545219') def testLoops(self): g = ops.Graph() with g.as_default(): def _Cond(_, counter): return counter < end def _Body(buf, counter): buf = array_ops.concat([buf, [counter]], 0) counter += 1 return [buf, counter] start = array_ops.placeholder(shape=[], dtype=dtypes.int32) end = array_ops.placeholder(shape=[], dtype=dtypes.int32) init_buf = array_ops.zeros(shape=[0], dtype=dtypes.int32) loop_vars = [init_buf, start] shape_inv = [ tensor_shape.TensorShape([None]), tensor_shape.TensorShape([]) ] buf, _ = control_flow_ops.while_loop(_Cond, _Body, loop_vars, shape_inv) f = -array_ops.ones_like(buf, optimize=False) buf_shape = array_ops.shape(buf) f_shape = array_ops.shape(f) ops.add_to_collection('train_op', buf_shape) ops.add_to_collection('train_op', f_shape) # Optimize the graph. mg = meta_graph.create_meta_graph_def(graph=g) config = config_pb2.ConfigProto() rewriter_config = config.graph_options.rewrite_options rewriter_config.min_graph_nodes = -1 optimized_graph = tf_optimizer.OptimizeGraph(config, mg) mg.graph_def.CopyFrom(optimized_graph) # Check that the nodes referenced in various collections have been preserved item = gitem.Item(mg) props = item.GetOpProperties() buf_prop = props[buf.op.name] f_prop = props[f.op.name] self.assertEqual(buf_prop, f_prop) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/tf_optimizer_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Controller Class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import defaultdict class Controller(object): """Controller class.""" def __init__(self, item, cluster): """Controller class initializer. Args: item: The metagraph to place wrapped in a cluster. cluster: A cluster of devices on which to place the item. """ self.item = item self._node = {} for node in item.metagraph.graph_def.node: self._node[node.name] = node self._fanout = defaultdict(lambda: []) for node in item.metagraph.graph_def.node: for fanin in self._get_node_fanin(node): self._fanout[fanin.name].append(node) important_op_names = item.IdentifyImportantOps(sort_topologically=True) # List of important ops (these are the ops to place) sorted in topological # order. The order of this collection is deterministic. self.important_ops = [] for name in important_op_names: self.important_ops.append(self._node[name]) self.node_properties = item.GetOpProperties() self.cluster = cluster self.devices = cluster.ListDevices() self.colocation_constraints = item.GetColocationGroups() self.placement_constraints = cluster.GetSupportedDevices(item) for node_name, dev in self.placement_constraints.items(): if len(dev) == 1: # Place the node on the supported device node = self._node[node_name] node.device = dev[0] fanout = self.get_node_fanout(node) # Update the fanout of the fanin to bypass the node for fanin in self._get_node_fanin(node): fanout_of_fanin = self.get_node_fanout(fanin) fanout_of_fanin += fanout fanout_of_fanin.remove(node) # Remove node from the list of important ops since we don't need to # place the node. if node in self.important_ops: self.important_ops.remove(node) important_op_names.remove(node.name) # List of important op names, in non deterministic order. self.important_op_names = frozenset(important_op_names) @property def input_graph_def(self): return self.item.metagraph.graph_def @property def num_devices(self): return len(self.devices) def get_node_by_name(self, node_name): return self._node[node_name] def get_node_fanout(self, node): return self._fanout[node.name] def get_placements(self, *args, **kwargs): """Returns: Two TF ops. Args: *args: "". **kwargs: "". Returns: y_preds: tensor of size [batch_size, num_ops] log_probs: python dict of at least two fields: "sample", "target" each containing a tensor of size [batch_size], corresponding to the log_probs. """ raise NotImplementedError def eval_placement(self, sess, *args, **kwargs): """At this time, this method evaluates ONLY ONE placement. Args: sess: a tf.compat.v1.Session() object used to retrieve cached assignment info. *args: "". **kwargs: "". Returns: run_time: scalar """ raise NotImplementedError def export_placement(self, metagraph): """Annotate the placement onto the specified metagraph. Args: metagraph: the metagraph to annotate with the placement. """ for node in metagraph.graph_def.node: if node.name in self.important_op_names: node.device = self.get_node_by_name(node.name).device # Get the nodes in the immediate fanin of node. # Beware: this doesn't take into account the nodes that may be skipped # since placement constraints force their placement. def _get_node_fanin(self, node): input_ops = [] for fanin_name in node.input: if fanin_name[0] == "^": fanin_name = fanin_name[1:] fanin_name = fanin_name.split(":")[0] input_ops.append(self.get_node_by_name(fanin_name)) return input_ops
tensorflow-master
tensorflow/python/grappler/controller.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """HierarchicalController Class. The HierarchicalController encompasses the entire lifecycle of training the device placement policy, including generating op embeddings, getting groups for each op, placing those groups and running the predicted placements. Different assignment models can inherit from this class. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import numpy as np import six from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops as tf_ops from tensorflow.python.grappler.controller import Controller from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import linalg_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variable_scope from tensorflow.python.summary import summary from tensorflow.python.training import adam from tensorflow.python.training import gradient_descent from tensorflow.python.training import learning_rate_decay from tensorflow.python.training import training_util class PlacerParams(object): """Class to hold a set of placement parameters as name-value pairs. A typical usage is as follows: ```python # Create a PlacerParams object specifying names and values of the model # parameters: params = PlacerParams(hidden_size=128, decay_steps=50) # The parameters are available as attributes of the PlacerParams object: hparams.hidden_size ==> 128 hparams.decay_steps ==> 50 ``` """ def __init__(self, **kwargs): """Create an instance of `PlacerParams` from keyword arguments. The keyword arguments specify name-values pairs for the parameters. The parameter types are inferred from the type of the values passed. The parameter names are added as attributes of `PlacerParams` object, and they can be accessed directly with the dot notation `params._name_`. Example: ```python # Define 1 parameter: 'hidden_size' params = PlacerParams(hidden_size=128) params.hidden_size ==> 128 ``` Args: **kwargs: Key-value pairs where the key is the parameter name and the value is the value for the parameter. """ for name, value in six.iteritems(kwargs): self.add_param(name, value) def add_param(self, name, value): """Adds {name, value} pair to hyperparameters. Args: name: Name of the hyperparameter. value: Value of the hyperparameter. Can be one of the following types: int, float, string, int list, float list, or string list. Raises: ValueError: if one of the arguments is invalid. """ # Keys in kwargs are unique, but 'name' could be the name of a pre-existing # attribute of this object. In that case we refuse to use it as a # parameter name. if getattr(self, name, None) is not None: raise ValueError("Parameter name is reserved: %s" % name) setattr(self, name, value) def hierarchical_controller_hparams(): """Hyperparameters for hierarchical planner.""" return PlacerParams( hidden_size=512, forget_bias_init=1.0, temperature=1.0, logits_std_noise=0.5, stop_noise_step=750, decay_steps=50, max_num_outputs=5, max_output_size=5, tanh_constant=1.0, adj_embed_dim=20, grouping_hidden_size=64, num_groups=None, bi_lstm=True, failing_signal=100, stop_sampling=500, start_with_failing_signal=True, always_update_baseline=False, bl_dec=0.9, grad_bound=1.0, lr=0.1, lr_dec=0.95, start_decay_step=400, optimizer_type="adam", stop_updating_after_steps=1000, name="hierarchical_controller", keep_prob=1.0, reward_function="sqrt", seed=1234, # distributed training params num_children=1) class HierarchicalController(Controller): """HierarchicalController class.""" def __init__(self, hparams, item, cluster, controller_id=0): """HierarchicalController class initializer. Args: hparams: All hyper-parameters. item: The metagraph to place. cluster: The cluster of hardware devices to optimize for. controller_id: the id of the controller in a multi-controller setup. """ super(HierarchicalController, self).__init__(item, cluster) self.ctrl_id = controller_id self.hparams = hparams if self.hparams.num_groups is None: self.num_groups = min(256, 20 * self.num_devices) else: self.num_groups = self.hparams.num_groups # creates self.op_embeddings and self.type_dict self.create_op_embeddings(verbose=False) # TODO(azalia) clean up embedding/group_embedding_size names self.group_emb_size = ( 2 * self.num_groups + len(self.type_dict) + self.hparams.max_num_outputs * self.hparams.max_output_size) self.embedding_size = self.group_emb_size self.initializer = init_ops.glorot_uniform_initializer( seed=self.hparams.seed) with variable_scope.variable_scope( self.hparams.name, initializer=self.initializer, reuse=variable_scope.AUTO_REUSE): # define parameters of feedforward variable_scope.get_variable("w_grouping_ff", [ 1 + self.hparams.max_num_outputs * self.hparams.max_output_size + self.hparams.adj_embed_dim, self.hparams.grouping_hidden_size ]) variable_scope.get_variable( "w_grouping_softmax", [self.hparams.grouping_hidden_size, self.num_groups]) if self.hparams.bi_lstm: variable_scope.get_variable("encoder_lstm_forward", [ self.embedding_size + self.hparams.hidden_size / 2, 2 * self.hparams.hidden_size ]) variable_scope.get_variable("encoder_lstm_backward", [ self.embedding_size + self.hparams.hidden_size / 2, 2 * self.hparams.hidden_size ]) variable_scope.get_variable( "device_embeddings", [self.num_devices, self.hparams.hidden_size]) variable_scope.get_variable( "decoder_lstm", [2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size]) variable_scope.get_variable( "device_softmax", [2 * self.hparams.hidden_size, self.num_devices]) variable_scope.get_variable("device_go_embedding", [1, self.hparams.hidden_size]) variable_scope.get_variable( "encoder_forget_bias", shape=1, dtype=dtypes.float32, initializer=init_ops.constant_initializer( self.hparams.forget_bias_init)) variable_scope.get_variable( "decoder_forget_bias", shape=1, dtype=dtypes.float32, initializer=init_ops.constant_initializer( self.hparams.forget_bias_init)) variable_scope.get_variable( "attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size]) variable_scope.get_variable( "attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size]) variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1]) else: variable_scope.get_variable("encoder_lstm", [ self.embedding_size + self.hparams.hidden_size, 4 * self.hparams.hidden_size ]) variable_scope.get_variable( "device_embeddings", [self.num_devices, self.hparams.hidden_size]) variable_scope.get_variable( "decoder_lstm", [2 * self.hparams.hidden_size, 4 * self.hparams.hidden_size]) variable_scope.get_variable( "device_softmax", [2 * self.hparams.hidden_size, self.num_devices]) variable_scope.get_variable("device_go_embedding", [1, self.hparams.hidden_size]) variable_scope.get_variable( "encoder_forget_bias", shape=1, dtype=dtypes.float32, initializer=init_ops.constant_initializer( self.hparams.forget_bias_init)) variable_scope.get_variable( "decoder_forget_bias", shape=1, dtype=dtypes.float32, initializer=init_ops.constant_initializer( self.hparams.forget_bias_init)) variable_scope.get_variable( "attn_w_1", [self.hparams.hidden_size, self.hparams.hidden_size]) variable_scope.get_variable( "attn_w_2", [self.hparams.hidden_size, self.hparams.hidden_size]) variable_scope.get_variable("attn_v", [self.hparams.hidden_size, 1]) seq2seq_input_layer = array_ops.placeholder_with_default( array_ops.zeros([self.hparams.num_children, self.num_groups, self.group_emb_size], dtypes.float32), shape=(self.hparams.num_children, self.num_groups, self.group_emb_size)) self.seq2seq_input_layer = seq2seq_input_layer def compute_reward(self, run_time): if self.hparams.reward_function == "id": reward = run_time elif self.hparams.reward_function == "sqrt": reward = math.sqrt(run_time) elif self.hparams.reward_function == "log": reward = math.log1p(run_time) else: raise NotImplementedError( "Unrecognized reward function '%s', consider your " "--reward_function flag value." % self.hparams.reward_function) return reward def build_controller(self): """RL optimization interface. Returns: ops: A dictionary holding handles of the model used for training. """ self._global_step = training_util.get_or_create_global_step() ops = {} ops["loss"] = 0 failing_signal = self.compute_reward(self.hparams.failing_signal) ctr = {} with tf_ops.name_scope("controller_{}".format(self.ctrl_id)): with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)): ctr["reward"] = {"value": [], "ph": [], "update": []} ctr["ready"] = {"value": [], "ph": [], "update": []} ctr["best_reward"] = {"value": [], "update": []} for i in range(self.hparams.num_children): reward_value = variable_scope.get_local_variable( "reward_{}".format(i), initializer=0.0, dtype=dtypes.float32, trainable=False) reward_ph = array_ops.placeholder( dtypes.float32, shape=(), name="reward_ph_{}".format(i)) reward_update = state_ops.assign( reward_value, reward_ph, use_locking=True) ctr["reward"]["value"].append(reward_value) ctr["reward"]["ph"].append(reward_ph) ctr["reward"]["update"].append(reward_update) best_reward = variable_scope.get_local_variable( "best_reward_{}".format(i), initializer=failing_signal, dtype=dtypes.float32, trainable=False) ctr["best_reward"]["value"].append(best_reward) ctr["best_reward"]["update"].append( state_ops.assign(best_reward, math_ops.minimum(best_reward, reward_update))) ready_value = variable_scope.get_local_variable( "ready_{}".format(i), initializer=True, dtype=dtypes.bool, trainable=False) ready_ph = array_ops.placeholder( dtypes.bool, shape=(), name="ready_ph_{}".format(i)) ready_update = state_ops.assign( ready_value, ready_ph, use_locking=True) ctr["ready"]["value"].append(ready_value) ctr["ready"]["ph"].append(ready_ph) ctr["ready"]["update"].append(ready_update) ctr["grouping_y_preds"], ctr["grouping_log_probs"] = self.get_groupings() summary.histogram( "grouping_actions", array_ops.slice(ctr["grouping_y_preds"]["sample"], [0, 0], [1, array_ops.shape(self.op_embeddings)[0]])) with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)): ctr["baseline"] = variable_scope.get_local_variable( "baseline", initializer=failing_signal if self.hparams.start_with_failing_signal else 0.0, dtype=dtypes.float32, trainable=False) new_baseline = self.hparams.bl_dec * ctr["baseline"] + ( 1 - self.hparams.bl_dec) * math_ops.reduce_mean( ctr["reward"]["value"]) if not self.hparams.always_update_baseline: baseline_mask = math_ops.less(ctr["reward"]["value"], failing_signal) selected_reward = array_ops.boolean_mask(ctr["reward"]["value"], baseline_mask) selected_baseline = control_flow_ops.cond( math_ops.reduce_any(baseline_mask), lambda: math_ops.reduce_mean(selected_reward), lambda: constant_op.constant(0, dtype=dtypes.float32)) ctr["pos_reward"] = selected_baseline pos_ = math_ops.less( constant_op.constant(0, dtype=dtypes.float32), selected_baseline) selected_baseline = self.hparams.bl_dec * ctr["baseline"] + ( 1 - self.hparams.bl_dec) * selected_baseline selected_baseline = control_flow_ops.cond( pos_, lambda: selected_baseline, lambda: ctr["baseline"]) new_baseline = control_flow_ops.cond( math_ops.less(self.global_step, self.hparams.stop_updating_after_steps), lambda: new_baseline, lambda: selected_baseline) ctr["baseline_update"] = state_ops.assign( ctr["baseline"], new_baseline, use_locking=True) ctr["y_preds"], ctr["log_probs"] = self.get_placements() summary.histogram("actions", ctr["y_preds"]["sample"]) mask = math_ops.less(ctr["reward"]["value"], failing_signal) ctr["loss"] = ctr["reward"]["value"] - ctr["baseline"] ctr["loss"] *= ( ctr["log_probs"]["sample"] + ctr["grouping_log_probs"]["sample"]) selected_loss = array_ops.boolean_mask(ctr["loss"], mask) selected_loss = control_flow_ops.cond( math_ops.reduce_any(mask), lambda: math_ops.reduce_mean(-selected_loss), lambda: constant_op.constant(0, dtype=dtypes.float32)) ctr["loss"] = control_flow_ops.cond( math_ops.less(self.global_step, self.hparams.stop_updating_after_steps), lambda: math_ops.reduce_mean(-ctr["loss"]), lambda: selected_loss) ctr["reward_s"] = math_ops.reduce_mean(ctr["reward"]["value"]) summary.scalar("loss", ctr["loss"]) summary.scalar("avg_reward", ctr["reward_s"]) summary.scalar("best_reward_so_far", best_reward) summary.scalar( "advantage", math_ops.reduce_mean(ctr["reward"]["value"] - ctr["baseline"])) with variable_scope.variable_scope( "optimizer", reuse=variable_scope.AUTO_REUSE): (ctr["train_op"], ctr["lr"], ctr["grad_norm"], ctr["grad_norms"]) = self._get_train_ops( ctr["loss"], tf_ops.get_collection(tf_ops.GraphKeys.TRAINABLE_VARIABLES), self.global_step, grad_bound=self.hparams.grad_bound, lr_init=self.hparams.lr, lr_dec=self.hparams.lr_dec, start_decay_step=self.hparams.start_decay_step, decay_steps=self.hparams.decay_steps, optimizer_type=self.hparams.optimizer_type) summary.scalar("gradnorm", ctr["grad_norm"]) summary.scalar("lr", ctr["lr"]) ctr["summary"] = summary.merge_all() ops["controller"] = ctr self.ops = ops return ops @property def global_step(self): return self._global_step def create_op_embeddings(self, verbose=False): if verbose: print("process input graph for op embeddings") self.num_ops = len(self.important_ops) # topological sort of important nodes topo_order = [op.name for op in self.important_ops] # create index to name for topologicaly sorted important nodes name_to_topo_order_index = {} for idx, x in enumerate(topo_order): name_to_topo_order_index[x] = idx self.name_to_topo_order_index = name_to_topo_order_index # create adj matrix adj_dict = {} for idx, op in enumerate(self.important_ops): for output_op in self.get_node_fanout(op): output_op_name = output_op.name if output_op_name in self.important_op_names: if name_to_topo_order_index[op.name] not in adj_dict: adj_dict[name_to_topo_order_index[op.name]] = [] adj_dict[name_to_topo_order_index[op.name]].extend( [name_to_topo_order_index[output_op_name], 1]) if output_op_name not in adj_dict: adj_dict[name_to_topo_order_index[output_op_name]] = [] adj_dict[name_to_topo_order_index[output_op_name]].extend( [name_to_topo_order_index[op.name], -1]) # get op_type op_output_shape, and adj info output_embed_dim = (self.hparams.max_num_outputs * self.hparams.max_output_size) # TODO(bsteiner): don't filter based on used ops so that we can generalize # to models that use other types of ops. used_ops = set() for node in self.important_ops: op_type = str(node.op) used_ops.add(op_type) self.type_dict = {} for op_type in self.cluster.ListAvailableOps(): if op_type in used_ops: self.type_dict[op_type] = len(self.type_dict) op_types = np.zeros([self.num_ops], dtype=np.int32) op_output_shapes = np.full( [self.num_ops, output_embed_dim], -1.0, dtype=np.float32) for idx, node in enumerate(self.important_ops): op_types[idx] = self.type_dict[node.op] # output shape op_name = node.name for i, output_prop in enumerate(self.node_properties[op_name]): if output_prop.shape.__str__() == "<unknown>": continue shape = output_prop.shape for j, dim in enumerate(shape.dim): if dim.size >= 0: if i * self.hparams.max_output_size + j >= output_embed_dim: break op_output_shapes[idx, i * self.hparams.max_output_size + j] = dim.size # adj for padding op_adj = np.full( [self.num_ops, self.hparams.adj_embed_dim], 0, dtype=np.float32) for idx in adj_dict: neighbors = adj_dict[int(idx)] min_dim = min(self.hparams.adj_embed_dim, len(neighbors)) padding_size = self.hparams.adj_embed_dim - min_dim neighbors = neighbors[:min_dim] + [0] * padding_size op_adj[int(idx)] = neighbors # op_embedding starts here op_embeddings = np.zeros( [ self.num_ops, 1 + self.hparams.max_num_outputs * self.hparams.max_output_size + self.hparams.adj_embed_dim ], dtype=np.float32) for idx, op_name in enumerate(topo_order): op_embeddings[idx] = np.concatenate( (np.array([op_types[idx]]), op_output_shapes[idx], op_adj[int(idx)])) self.op_embeddings = constant_op.constant( op_embeddings, dtype=dtypes.float32) if verbose: print("num_ops = {}".format(self.num_ops)) print("num_types = {}".format(len(self.type_dict))) def get_groupings(self, *args, **kwargs): num_children = self.hparams.num_children with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)): grouping_actions_cache = variable_scope.get_local_variable( "grouping_actions_cache", initializer=init_ops.zeros_initializer, dtype=dtypes.int32, shape=[num_children, self.num_ops], trainable=False) input_layer = self.op_embeddings input_layer = array_ops.expand_dims(input_layer, 0) feed_ff_input_layer = array_ops.tile(input_layer, [num_children, 1, 1]) grouping_actions, grouping_log_probs = {}, {} grouping_actions["sample"], grouping_log_probs[ "sample"] = self.make_grouping_predictions(feed_ff_input_layer) grouping_actions["sample"] = state_ops.assign(grouping_actions_cache, grouping_actions["sample"]) self.grouping_actions_cache = grouping_actions_cache return grouping_actions, grouping_log_probs def make_grouping_predictions(self, input_layer, reuse=None): """model that predicts grouping (grouping_actions). Args: input_layer: group_input_layer reuse: reuse Returns: grouping_actions: actions grouping_log_probs: log probabilities corresponding to actions """ with variable_scope.variable_scope(self.hparams.name, reuse=True): # input_layer: tensor of size [1, num_ops, hidden_size] w_grouping_ff = variable_scope.get_variable("w_grouping_ff") w_grouping_softmax = variable_scope.get_variable("w_grouping_softmax") batch_size = array_ops.shape(input_layer)[0] embedding_dim = array_ops.shape(input_layer)[2] reshaped = array_ops.reshape(input_layer, [batch_size * self.num_ops, embedding_dim]) ff_output = math_ops.matmul(reshaped, w_grouping_ff) logits = math_ops.matmul(ff_output, w_grouping_softmax) if self.hparams.logits_std_noise > 0: num_in_logits = math_ops.cast( array_ops.size(logits), dtype=dtypes.float32) avg_norm = math_ops.divide( linalg_ops.norm(logits), math_ops.sqrt(num_in_logits)) logits_noise = random_ops.random_normal( array_ops.shape(logits), stddev=self.hparams.logits_std_noise * avg_norm) logits = control_flow_ops.cond( self.global_step > self.hparams.stop_noise_step, lambda: logits, lambda: logits + logits_noise) logits = array_ops.reshape(logits, [batch_size * self.num_ops, self.num_groups]) actions = random_ops.multinomial(logits, 1, seed=self.hparams.seed) actions = math_ops.cast(actions, dtypes.int32) actions = array_ops.reshape(actions, [batch_size, self.num_ops]) action_label = array_ops.reshape(actions, [-1]) log_probs = nn_ops.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=action_label) log_probs = array_ops.reshape(log_probs, [batch_size, -1]) log_probs = math_ops.reduce_sum(log_probs, 1) grouping_actions = actions grouping_log_probs = log_probs return grouping_actions, grouping_log_probs def create_group_embeddings(self, grouping_actions, verbose=False): """Approximating the blocks of a TF graph from a graph_def. Args: grouping_actions: grouping predictions. verbose: print stuffs. Returns: groups: list of groups. """ groups = [ self._create_group_embeddings(grouping_actions, i, verbose) for i in range(self.hparams.num_children) ] return np.stack(groups, axis=0) def _create_group_embeddings(self, grouping_actions, child_id, verbose=False): """Approximating the blocks of a TF graph from a graph_def for each child. Args: grouping_actions: grouping predictions. child_id: child_id for the group. verbose: print stuffs. Returns: groups: group embedding for the child_id. """ if verbose: print("Processing input_graph") # TODO(azalia): Build inter-adjacencies dag matrix. # record dag_matrix dag_matrix = np.zeros([self.num_groups, self.num_groups], dtype=np.float32) for op in self.important_ops: topo_op_index = self.name_to_topo_order_index[op.name] group_index = grouping_actions[child_id][topo_op_index] for output_op in self.get_node_fanout(op): if output_op.name not in self.important_op_names: continue output_group_index = ( grouping_actions[child_id][self.name_to_topo_order_index[ output_op.name]]) dag_matrix[group_index, output_group_index] += 1.0 num_connections = np.sum(dag_matrix) num_intra_group_connections = dag_matrix.trace() num_inter_group_connections = num_connections - num_intra_group_connections if verbose: print("grouping evaluation metric") print(("num_connections={} num_intra_group_connections={} " "num_inter_group_connections={}").format( num_connections, num_intra_group_connections, num_inter_group_connections)) self.dag_matrix = dag_matrix # output_shape op_output_shapes = np.zeros( [ len(self.important_ops), self.hparams.max_num_outputs * self.hparams.max_output_size ], dtype=np.float32) for idx, op in enumerate(self.important_ops): for i, output_properties in enumerate(self.node_properties[op.name]): if output_properties.shape.__str__() == "<unknown>": continue if i > self.hparams.max_num_outputs: break shape = output_properties.shape for j, dim in enumerate(shape.dim): if dim.size > 0: k = i * self.hparams.max_output_size + j if k >= self.hparams.max_num_outputs * self.hparams.max_output_size: break op_output_shapes[idx, k] = dim.size # group_embedding group_embedding = np.zeros( [ self.num_groups, len(self.type_dict) + self.hparams.max_num_outputs * self.hparams.max_output_size ], dtype=np.float32) for op_index, op in enumerate(self.important_ops): group_index = grouping_actions[child_id][ self.name_to_topo_order_index[op.name]] type_name = str(op.op) type_index = self.type_dict[type_name] group_embedding[group_index, type_index] += 1 group_embedding[group_index, :self.hparams.max_num_outputs * self.hparams. max_output_size] += ( op_output_shapes[op_index]) grouping_adjacencies = np.concatenate( [dag_matrix, np.transpose(dag_matrix)], axis=1) group_embedding = np.concatenate( [grouping_adjacencies, group_embedding], axis=1) group_normalizer = np.amax(group_embedding, axis=1, keepdims=True) group_embedding /= (group_normalizer + 1.0) if verbose: print("Finished Processing Input Graph") return group_embedding def get_placements(self, *args, **kwargs): num_children = self.hparams.num_children with variable_scope.variable_scope("controller_{}".format(self.ctrl_id)): actions_cache = variable_scope.get_local_variable( "actions_cache", initializer=init_ops.zeros_initializer, dtype=dtypes.int32, shape=[num_children, self.num_groups], trainable=False) x = self.seq2seq_input_layer last_c, last_h, attn_mem = self.encode(x) actions, log_probs = {}, {} actions["sample"], log_probs["sample"] = ( self.decode( x, last_c, last_h, attn_mem, mode="sample")) actions["target"], log_probs["target"] = ( self.decode( x, last_c, last_h, attn_mem, mode="target", y=actions_cache)) actions["greedy"], log_probs["greedy"] = ( self.decode( x, last_c, last_h, attn_mem, mode="greedy")) actions["sample"] = control_flow_ops.cond( self.global_step < self.hparams.stop_sampling, lambda: state_ops.assign(actions_cache, actions["sample"]), lambda: state_ops.assign(actions_cache, actions["target"])) self.actions_cache = actions_cache return actions, log_probs def encode(self, x): """Encoder using LSTM. Args: x: tensor of size [num_children, num_groups, embedding_size] Returns: last_c, last_h: tensors of size [num_children, hidden_size], the final LSTM states attn_mem: tensor of size [num_children, num_groups, hidden_size], the attention memory, i.e. concatenation of all hidden states, linearly transformed by an attention matrix attn_w_1 """ if self.hparams.bi_lstm: with variable_scope.variable_scope(self.hparams.name, reuse=True): w_lstm_forward = variable_scope.get_variable("encoder_lstm_forward") w_lstm_backward = variable_scope.get_variable("encoder_lstm_backward") forget_bias = variable_scope.get_variable("encoder_forget_bias") attn_w_1 = variable_scope.get_variable("attn_w_1") else: with variable_scope.variable_scope(self.hparams.name, reuse=True): w_lstm = variable_scope.get_variable("encoder_lstm") forget_bias = variable_scope.get_variable("encoder_forget_bias") attn_w_1 = variable_scope.get_variable("attn_w_1") embedding_size = array_ops.shape(x)[2] signals = array_ops.split(x, self.num_groups, axis=1) for i in range(len(signals)): signals[i] = array_ops.reshape( signals[i], [self.hparams.num_children, embedding_size]) if self.hparams.bi_lstm: def body(i, prev_c_forward, prev_h_forward, prev_c_backward, prev_h_backward): """while loop for LSTM.""" signal_forward = signals[i] next_c_forward, next_h_forward = lstm(signal_forward, prev_c_forward, prev_h_forward, w_lstm_forward, forget_bias) signal_backward = signals[self.num_groups - 1 - i] next_c_backward, next_h_backward = lstm( signal_backward, prev_c_backward, prev_h_backward, w_lstm_backward, forget_bias) next_h = array_ops.concat([next_h_forward, next_h_backward], axis=1) all_h.append(next_h) return (next_c_forward, next_h_forward, next_c_backward, next_h_backward) c_forward = array_ops.zeros( [self.hparams.num_children, self.hparams.hidden_size / 2], dtype=dtypes.float32) h_forward = array_ops.zeros( [self.hparams.num_children, self.hparams.hidden_size / 2], dtype=dtypes.float32) c_backward = array_ops.zeros( [self.hparams.num_children, self.hparams.hidden_size / 2], dtype=dtypes.float32) h_backward = array_ops.zeros( [self.hparams.num_children, self.hparams.hidden_size / 2], dtype=dtypes.float32) all_h = [] for i in range(0, self.num_groups): c_forward, h_forward, c_backward, h_backward = body( i, c_forward, h_forward, c_backward, h_backward) last_c = array_ops.concat([c_forward, c_backward], axis=1) last_h = array_ops.concat([h_forward, h_backward], axis=1) attn_mem = array_ops.stack(all_h) else: def body(i, prev_c, prev_h): signal = signals[i] next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias) all_h.append(next_h) return next_c, next_h c = array_ops.zeros( [self.hparams.num_children, self.hparams.hidden_size], dtype=dtypes.float32) h = array_ops.zeros( [self.hparams.num_children, self.hparams.hidden_size], dtype=dtypes.float32) all_h = [] for i in range(0, self.num_groups): c, h = body(i, c, h) last_c = c last_h = h attn_mem = array_ops.stack(all_h) attn_mem = array_ops.transpose(attn_mem, [1, 0, 2]) attn_mem = array_ops.reshape( attn_mem, [self.hparams.num_children * self.num_groups, self.hparams.hidden_size]) attn_mem = math_ops.matmul(attn_mem, attn_w_1) attn_mem = array_ops.reshape( attn_mem, [self.hparams.num_children, self.num_groups, self.hparams.hidden_size]) return last_c, last_h, attn_mem def decode(self, x, last_c, last_h, attn_mem, mode="target", y=None): """Decoder using LSTM. Args: x: tensor of size [num_children, num_groups, embedding_size]. last_c: tensor of size [num_children, hidden_size], the final LSTM states computed by self.encoder. last_h: same as last_c. attn_mem: tensor of size [num_children, num_groups, hidden_size]. mode: "target" or "sample". y: tensor of size [num_children, num_groups], the device placements. Returns: actions: tensor of size [num_children, num_groups], the placements of devices """ with variable_scope.variable_scope(self.hparams.name, reuse=True): w_lstm = variable_scope.get_variable("decoder_lstm") forget_bias = variable_scope.get_variable("decoder_forget_bias") device_embeddings = variable_scope.get_variable("device_embeddings") device_softmax = variable_scope.get_variable("device_softmax") device_go_embedding = variable_scope.get_variable("device_go_embedding") attn_w_2 = variable_scope.get_variable("attn_w_2") attn_v = variable_scope.get_variable("attn_v") actions = tensor_array_ops.TensorArray( dtypes.int32, size=self.num_groups, infer_shape=False, clear_after_read=False) # pylint: disable=unused-argument def condition(i, *args): return math_ops.less(i, self.num_groups) # pylint: disable=missing-docstring def body(i, prev_c, prev_h, actions, log_probs): # pylint: disable=g-long-lambda signal = control_flow_ops.cond( math_ops.equal(i, 0), lambda: array_ops.tile(device_go_embedding, [self.hparams.num_children, 1]), lambda: embedding_ops.embedding_lookup(device_embeddings, actions.read(i - 1)) ) if self.hparams.keep_prob is not None: signal = nn_ops.dropout(signal, self.hparams.keep_prob) next_c, next_h = lstm(signal, prev_c, prev_h, w_lstm, forget_bias) query = math_ops.matmul(next_h, attn_w_2) query = array_ops.reshape( query, [self.hparams.num_children, 1, self.hparams.hidden_size]) query = math_ops.tanh(query + attn_mem) query = array_ops.reshape(query, [ self.hparams.num_children * self.num_groups, self.hparams.hidden_size ]) query = math_ops.matmul(query, attn_v) query = array_ops.reshape(query, [self.hparams.num_children, self.num_groups]) query = nn_ops.softmax(query) query = array_ops.reshape(query, [self.hparams.num_children, self.num_groups, 1]) query = math_ops.reduce_sum(attn_mem * query, axis=1) query = array_ops.concat([next_h, query], axis=1) logits = math_ops.matmul(query, device_softmax) logits /= self.hparams.temperature if self.hparams.tanh_constant > 0: logits = math_ops.tanh(logits) * self.hparams.tanh_constant if self.hparams.logits_std_noise > 0: num_in_logits = math_ops.cast( array_ops.size(logits), dtype=dtypes.float32) avg_norm = math_ops.divide( linalg_ops.norm(logits), math_ops.sqrt(num_in_logits)) logits_noise = random_ops.random_normal( array_ops.shape(logits), stddev=self.hparams.logits_std_noise * avg_norm) logits = control_flow_ops.cond( self.global_step > self.hparams.stop_noise_step, lambda: logits, lambda: logits + logits_noise) if mode == "sample": next_y = random_ops.multinomial(logits, 1, seed=self.hparams.seed) elif mode == "greedy": next_y = math_ops.argmax(logits, 1) elif mode == "target": next_y = array_ops.slice(y, [0, i], [-1, 1]) else: raise NotImplementedError next_y = math_ops.cast(next_y, dtypes.int32) next_y = array_ops.reshape(next_y, [self.hparams.num_children]) actions = actions.write(i, next_y) log_probs += nn_ops.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=next_y) return i + 1, next_c, next_h, actions, log_probs loop_vars = [ constant_op.constant(0, dtype=dtypes.int32), last_c, last_h, actions, array_ops.zeros([self.hparams.num_children], dtype=dtypes.float32) ] loop_outputs = control_flow_ops.while_loop(condition, body, loop_vars) last_c = loop_outputs[-4] last_h = loop_outputs[-3] actions = loop_outputs[-2].stack() actions = array_ops.transpose(actions, [1, 0]) log_probs = loop_outputs[-1] return actions, log_probs def eval_placement(self, sess, child_id=0, verbose=False): grouping_actions, actions = sess.run([ self.grouping_actions_cache, self.actions_cache ]) grouping_actions = grouping_actions[child_id] actions = actions[child_id] if verbose: global_step = sess.run(self.global_step) if global_step % 100 == 0: log_string = "op group assignments: " for a in grouping_actions: log_string += "{} ".format(a) print(log_string[:-1]) log_string = "group device assignments: " for a in actions: log_string += "{} ".format(a) print(log_string[:-1]) for op in self.important_ops: topo_order_index = self.name_to_topo_order_index[op.name] group_index = grouping_actions[topo_order_index] op.device = self.devices[actions[group_index]].name try: _, run_time, _ = self.cluster.MeasureCosts(self.item) except errors.ResourceExhaustedError: run_time = self.hparams.failing_signal return run_time def update_reward(self, sess, run_time, child_id=0, verbose=False): reward = self.compute_reward(run_time) controller_ops = self.ops["controller"] _, best_reward = sess.run( [ controller_ops["reward"]["update"][child_id], controller_ops["best_reward"]["update"][child_id] ], feed_dict={ controller_ops["reward"]["ph"][child_id]: reward, }) if verbose: print(("run_time={:<.5f} reward={:<.5f} " "best_reward={:<.5f}").format(run_time, reward, best_reward)) # Reward is a double, best_reward a float: allow for some slack in the # comparison. updated = abs(best_reward - reward) < 1e-6 return updated def generate_grouping(self, sess): controller_ops = self.ops["controller"] grouping_actions = sess.run(controller_ops["grouping_y_preds"]["sample"]) return grouping_actions def generate_placement(self, grouping, sess): controller_ops = self.ops["controller"] feed_seq2seq_input_dict = {} feed_seq2seq_input_dict[self.seq2seq_input_layer] = grouping sess.run( controller_ops["y_preds"]["sample"], feed_dict=feed_seq2seq_input_dict) def process_reward(self, sess): controller_ops = self.ops["controller"] run_ops = [ controller_ops["loss"], controller_ops["lr"], controller_ops["grad_norm"], controller_ops["grad_norms"], controller_ops["train_op"] ] sess.run(run_ops) sess.run(controller_ops["baseline_update"]) def _get_train_ops(self, loss, tf_variables, global_step, grad_bound=1.25, lr_init=1e-3, lr_dec=0.9, start_decay_step=10000, decay_steps=100, optimizer_type="adam"): """Loss optimizer. Args: loss: scalar tf tensor tf_variables: list of training variables, typically tf.compat.v1.trainable_variables() global_step: global_step grad_bound: max gradient norm lr_init: initial learning rate lr_dec: leaning rate decay coefficient start_decay_step: start decaying learning rate after this many steps decay_steps: apply decay rate factor at this step intervals optimizer_type: optimizer type should be either adam or sgd Returns: train_op: training op learning_rate: scalar learning rate tensor grad_norm: l2 norm of the gradient vector all_grad_norms: l2 norm of each component """ lr_gstep = global_step - start_decay_step def f1(): return constant_op.constant(lr_init) def f2(): return learning_rate_decay.exponential_decay(lr_init, lr_gstep, decay_steps, lr_dec, True) learning_rate = control_flow_ops.cond( math_ops.less(global_step, start_decay_step), f1, f2, name="learning_rate") if optimizer_type == "adam": opt = adam.AdamOptimizer(learning_rate) elif optimizer_type == "sgd": opt = gradient_descent.GradientDescentOptimizer(learning_rate) grads_and_vars = opt.compute_gradients(loss, tf_variables) grad_norm = clip_ops.global_norm([g for g, v in grads_and_vars]) all_grad_norms = {} clipped_grads = [] clipped_rate = math_ops.maximum(grad_norm / grad_bound, 1.0) for g, v in grads_and_vars: if g is not None: if isinstance(g, tf_ops.IndexedSlices): clipped = g.values / clipped_rate norm_square = math_ops.reduce_sum(clipped * clipped) clipped = tf_ops.IndexedSlices(clipped, g.indices) else: clipped = g / clipped_rate norm_square = math_ops.reduce_sum(clipped * clipped) all_grad_norms[v.name] = math_ops.sqrt(norm_square) clipped_grads.append((clipped, v)) train_op = opt.apply_gradients(clipped_grads, global_step) return train_op, learning_rate, grad_norm, all_grad_norms def lstm(x, prev_c, prev_h, w_lstm, forget_bias): """LSTM cell. Args: x: tensors of size [num_children, hidden_size]. prev_c: tensors of size [num_children, hidden_size]. prev_h: same as prev_c. w_lstm: . forget_bias: . Returns: next_c: next_h: """ ifog = math_ops.matmul(array_ops.concat([x, prev_h], axis=1), w_lstm) i, f, o, g = array_ops.split(ifog, 4, axis=1) i = math_ops.sigmoid(i) f = math_ops.sigmoid(f + forget_bias) o = math_ops.sigmoid(o) g = math_ops.tanh(g) next_c = i * g + f * prev_c next_h = o * math_ops.tanh(next_c) return next_c, next_h
tensorflow-master
tensorflow/python/grappler/hierarchical_controller.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the swig wrapper tf_optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.grappler import tf_optimizer from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import training as train class MemoryOptimizerSwapTest(test.TestCase): """Tests the Grappler memory optimizer.""" @test_util.run_deprecated_v1 def testNoSwapping(self): """Make sure the graph is preserved when there is nothing to swap.""" a = variables.VariableV1(10, name='a') b = variables.VariableV1(20, name='b') c = math_ops.add_n([a, b], name='c') d = math_ops.add_n([b, c], name='d') train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) graph_size = len(mg.graph_def.node) nodes = [node.name for node in mg.graph_def.node] config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, constant_folding=rewriter_config_pb2.RewriterConfig.OFF, memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)) graph = tf_optimizer.OptimizeGraph(config, mg) self.assertEqual(len(graph.node), graph_size) self.assertItemsEqual([node.name for node in graph.node], nodes) @test_util.run_v1_only('b/120545219') def testSimpleSwap(self): """Check that the swap annotations are followed.""" with ops.device('/gpu:0'): a = variables.VariableV1(10, name='a') b = variables.VariableV1(20, name='b') c = math_ops.add_n([a, b], name='c') d = math_ops.add_n([b, c], name='d') train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(d) d.op._set_attr('_swap_to_host', attr_value_pb2.AttrValue(i=0)) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) graph_size = len(mg.graph_def.node) config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE, constant_folding=rewriter_config_pb2.RewriterConfig.OFF, memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL, min_graph_nodes=-1)) graph = tf_optimizer.OptimizeGraph(config, mg) self.assertEqual(len(graph.node), graph_size + 2) self.assertTrue( set([node.name for node in graph.node]) > set( ['a', 'b', 'c', 'd', 'swap_in_d_0', 'swap_out_d_0'])) for node in graph.node: if node.name == 'swap_in_d_0': self.assertEqual('swap_out_d_0', node.input[0]) self.assertEqual('^b/read', node.input[1]) elif node.name == 'swap_out_d_0': self.assertEqual('b/read', node.input[0]) elif node.name == 'd': self.assertEqual('swap_in_d_0', node.input[0]) self.assertEqual('c', node.input[1]) class MemoryOptimizerRecomputeTest(test.TestCase): """Tests the Python interface to recomputation rewrites. See core/grappler/optimizers/memory_optimizer_test.cc for functional tests. """ def _GetMetaGraph(self, batch_size=14, image_dim=12, optimizer_scope_name=''): """A simple layered graph with conv, an intermediate op, and a ReLU.""" graph = ops.Graph() with graph.as_default(): random_seed.set_random_seed(1) current_activation = variable_scope.get_variable( name='start', shape=[batch_size, image_dim, image_dim, 5]) conv_filter = variable_scope.get_variable( name='filter', shape=[5, 5, 5, 5]) for layer_number in range(10): with variable_scope.variable_scope('layer_{}'.format(layer_number)): after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1], 'SAME') current_activation = 2. * after_conv current_activation = nn.relu(current_activation) loss = math_ops.reduce_mean(current_activation) with ops.name_scope(optimizer_scope_name): optimizer = train.AdamOptimizer(0.001) train_op = optimizer.minimize(loss) init_op = variables.global_variables_initializer() metagraph = train.export_meta_graph() return (metagraph, init_op.name, train_op.name, loss.name) def testRewritingDefaultGradientNames(self): """Tests that rewriting occurs with default gradient names.""" (original_metagraph, _, _, _) = self._GetMetaGraph() config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, constant_folding=rewriter_config_pb2.RewriterConfig.OFF, dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF, layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF, arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF, min_graph_nodes=-1, memory_optimization=( rewriter_config_pb2.RewriterConfig.RECOMPUTATION_HEURISTICS))) rewritten_graph_def = tf_optimizer.OptimizeGraph(config, original_metagraph) self.assertGreater( len(rewritten_graph_def.node), len(original_metagraph.graph_def.node)) self.assertEqual( 0, len([node for node in original_metagraph.graph_def.node if 'Recomputed/' in node.name])) self.assertEqual( 20, # Two per layer len([node for node in rewritten_graph_def.node if 'Recomputed/' in node.name])) def testRewritingNameScopedGradientNames(self): """Tests that rewriting occurs with non-standard gradient names.""" (original_metagraph, _, _, _) = self._GetMetaGraph( optimizer_scope_name='optimizer') config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, constant_folding=rewriter_config_pb2.RewriterConfig.OFF, dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF, layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF, arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF, min_graph_nodes=-1, memory_optimization=rewriter_config_pb2.RewriterConfig .RECOMPUTATION_HEURISTICS, # Checks that name scope "gradients/" also match sub-scope. memory_optimizer_target_node_name_scope='gradients/')) rewritten_graph_def = tf_optimizer.OptimizeGraph(config, original_metagraph) self.assertGreater( len(rewritten_graph_def.node), len(original_metagraph.graph_def.node)) self.assertEqual( 0, len([node for node in original_metagraph.graph_def.node if 'Recomputed/' in node.name])) self.assertEqual( 20, # Two per layer len([node for node in rewritten_graph_def.node if 'Recomputed/' in node.name])) def testRewritingNameScopedGradientNamesScope(self): """Tests that rewriting occurs with non-standard gradient names.""" (original_metagraph, _, _, _) = self._GetMetaGraph(optimizer_scope_name='foo/bar') config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, constant_folding=rewriter_config_pb2.RewriterConfig.OFF, dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF, layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF, arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF, memory_optimization=rewriter_config_pb2.RewriterConfig .RECOMPUTATION_HEURISTICS, # This should not match anything. memory_optimizer_target_node_name_scope='r/gradients/')) rewritten_graph_def = tf_optimizer.OptimizeGraph(config, original_metagraph) self.assertEqual( len(rewritten_graph_def.node), len(original_metagraph.graph_def.node)) self.assertEqual(0, len([ node for node in original_metagraph.graph_def.node if 'Recomputed/' in node.name ])) self.assertEqual(0, len([ node for node in rewritten_graph_def.node if 'Recomputed/' in node.name ])) def _GetMemoryOptimizerSessionConfig(self): rewrite_options = rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, memory_optimization=rewriter_config_pb2.RewriterConfig.HEURISTICS) graph_options = config_pb2.GraphOptions(rewrite_options=rewrite_options) return config_pb2.ConfigProto(graph_options=graph_options) def _RunMetaGraphWithConfig( self, config, metagraph, init_op_name, train_op_name, loss_op_name): graph = ops.Graph() with graph.as_default(): train.import_meta_graph(metagraph) init_op = graph.get_operation_by_name(init_op_name) train_op = graph.get_operation_by_name(train_op_name) loss_op = graph.get_tensor_by_name(loss_op_name) with session.Session(config=config, graph=graph) as sess: self.evaluate(init_op) self.evaluate(train_op) self.evaluate(train_op) return self.evaluate(loss_op) def testRecomputationRewritingNoErrors(self): """Tests that graph output is not significantly different with rewriting.""" (original_metagraph, init_op_name, train_op_name, loss_op_name ) = self._GetMetaGraph() original_loss = self._RunMetaGraphWithConfig( config=config_pb2.ConfigProto(), metagraph=original_metagraph, init_op_name=init_op_name, train_op_name=train_op_name, loss_op_name=loss_op_name) memory_optimized_loss = self._RunMetaGraphWithConfig( config=self._GetMemoryOptimizerSessionConfig(), metagraph=original_metagraph, init_op_name=init_op_name, train_op_name=train_op_name, loss_op_name=loss_op_name) self.assertAllClose(original_loss, memory_optimized_loss, rtol=1e-2) def _annotated_graph(self): graph = ops.Graph() with graph.as_default(): random_seed.set_random_seed(2) current_activation = variable_scope.get_variable( name='start', shape=[1, 2, 2, 5]) conv_filter = variable_scope.get_variable( name='filter', shape=[5, 5, 5, 5]) for layer_number in range(3): with variable_scope.variable_scope('layer_{}'.format(layer_number)): after_conv = nn.conv2d(current_activation, conv_filter, [1, 1, 1, 1], 'SAME') current_activation = 2. * after_conv current_activation.op._set_attr( '_recompute_hint', # The value of the attribute does not matter; just that the key # exists in the op's attributes. attr_value_pb2.AttrValue(i=1)) current_activation += 5. current_activation.op._set_attr( '_recompute_hint', attr_value_pb2.AttrValue(i=0)) current_activation = nn.relu(current_activation) current_activation.op._set_attr( '_recompute_hint', attr_value_pb2.AttrValue(i=1)) loss = math_ops.reduce_mean(current_activation) optimizer = train.AdamOptimizer(0.001) train_op = optimizer.minimize(loss) init_op = variables.global_variables_initializer() return graph, init_op, train_op def testHintNoMetaGraph(self): # Closer to expected usage, but does not check that a re-write actually # happens; see testHintDoesRewrite. graph, init_op, train_op = self._annotated_graph() with graph.as_default(): manual_memory_config = rewriter_config_pb2.RewriterConfig( memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL) graph_options = config_pb2.GraphOptions( rewrite_options=manual_memory_config) session_config = config_pb2.ConfigProto(graph_options=graph_options) with session.Session(config=session_config) as sess: self.evaluate(init_op) self.evaluate(train_op) @test_util.run_v1_only('b/120545219') def testHintDoesRewrite(self): graph = self._annotated_graph()[0] with graph.as_default(): metagraph = train.export_meta_graph() self.assertEqual( 0, len([node for node in metagraph.graph_def.node if 'Recomputed/' in node.name])) config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( min_graph_nodes=-1, memory_optimization=rewriter_config_pb2.RewriterConfig.MANUAL)) rewritten_graph_def = tf_optimizer.OptimizeGraph(config, metagraph) self.assertEqual( 7, len([ node for node in rewritten_graph_def.node if 'Recomputed/' in node.name ])) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/memory_optimizer_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the cost analyzer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.grappler import cost_analyzer from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad # pylint: disable=unused-import from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adam class CostAnalysisTest(test.TestCase): @test_util.run_deprecated_v1 def testBasicCost(self): """Make sure arguments can be passed correctly.""" a = constant_op.constant(10, name="a") b = constant_op.constant(20, name="b") c = math_ops.add_n([a, b], name="c") d = math_ops.add_n([b, c], name="d") train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) report = cost_analyzer.GenerateCostReport(mg, per_node_report=True) # Check the report headers self.assertTrue(b"Total time measured in ns (serialized):" in report) self.assertTrue(b"Total time measured in ns (actual):" in report) self.assertTrue(b"Total time analytical in ns (upper bound):" in report) self.assertTrue(b"Total time analytical in ns (lower bound):" in report) self.assertTrue(b"Overall efficiency (analytical upper/actual):" in report) self.assertTrue(b"Overall efficiency (analytical lower/actual):" in report) self.assertTrue(b"Below is the per-node report summary:" in report) # Also print the report to make it easier to debug print("{}".format(report)) @test_util.run_deprecated_v1 def testVerbose(self): """Make sure the full report is generated with verbose=True.""" a = constant_op.constant(10, name="a") b = constant_op.constant(20, name="b") c = math_ops.add_n([a, b], name="c") d = math_ops.add_n([b, c], name="d") train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) report = cost_analyzer.GenerateCostReport( mg, per_node_report=True, verbose=True) # Check the report headers self.assertTrue(b"Below is the full per-node report:" in report) # Also print the report to make it easier to debug print("{}".format(report)) @test_util.run_deprecated_v1 def testSmallNetworkCost(self): image = array_ops.placeholder(dtypes.float32, shape=[1, 28, 28, 1]) label = array_ops.placeholder(dtypes.float32, shape=[1, 10]) w = variables.Variable( random_ops.truncated_normal([5, 5, 1, 32], stddev=0.1)) b = variables.Variable(random_ops.truncated_normal([32], stddev=0.1)) conv = nn_ops.conv2d(image, w, strides=[1, 1, 1, 1], padding="SAME") h_conv = nn_ops.relu(conv + b) h_conv_flat = array_ops.reshape(h_conv, [1, -1]) w_fc = variables.Variable( random_ops.truncated_normal([25088, 10], stddev=0.1)) b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1)) y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc) cross_entropy = math_ops.reduce_mean( -math_ops.reduce_sum(label * math_ops.log(y_conv), axis=[1])) _ = adam.AdamOptimizer(1e-4).minimize(cross_entropy) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) report = cost_analyzer.GenerateCostReport(mg) # Print the report to make it easier to debug print("{}".format(report)) self.assertTrue(b"MatMul" in report) self.assertTrue(b"ApplyAdam" in report) self.assertTrue(b"Conv2D" in report) self.assertTrue(b"Conv2DBackpropFilter" in report) self.assertTrue(b"Softmax" in report) for op_type in [b"MatMul", b"Conv2D", b"Conv2DBackpropFilter"]: matcher = re.compile( br"\s+" + op_type + br",\s*(\d+),\s*(\d+),\s*([\d\.eE+-]+)%,\s*" + br"([\d\.eE+-]+)%,\s*(-?\d+),\s*(\d+),", re.MULTILINE) m = matcher.search(report) op_count = int(m.group(1)) # upper = int(m.group(5)) lower = int(m.group(6)) if op_type == b"MatMul": self.assertEqual(3, op_count) else: self.assertEqual(1, op_count) self.assertTrue(0 <= lower) # self.assertTrue(0 < upper) # self.assertTrue(lower <= upper) @test_util.run_deprecated_v1 def testBasicMemory(self): """Make sure arguments can be passed correctly.""" with test_util.device(use_gpu=False): a = constant_op.constant(10, name="a") b = constant_op.constant(20, name="b") c = math_ops.add_n([a, b], name="c") d = math_ops.add_n([b, c], name="d") train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) report = cost_analyzer.GenerateMemoryReport(mg) # Print the report to make it easier to debug print("{}".format(report)) # Check the report self.assertTrue( "Peak usage for device /job:localhost/replica:0/task:0/device:CPU:0: " "16 bytes" in report) self.assertTrue(" a:0 uses 4 bytes" in report) self.assertTrue(" b:0 uses 4 bytes" in report) self.assertTrue(" c:0 uses 4 bytes" in report) self.assertTrue(" d:0 uses 4 bytes" in report) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/grappler/cost_analyzer_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests the graph placer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import device_properties_pb2 from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops as tf_ops from tensorflow.python.grappler import cluster from tensorflow.python.grappler import graph_placer from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test class GraphPlacerTest(test.TestCase): @staticmethod def _buildMnist(batch_size=128, input_size=256, num_classes=1024, num_layers=10, hidden_size=256, name='mnist'): g = tf_ops.get_default_graph() with g.as_default(): ops = {} x = random_ops.random_uniform( [batch_size, input_size], -0.1, 0.1, dtype=dtypes.float32) for layer_id in range(num_layers): with variable_scope.variable_scope('layer_{}'.format(layer_id)): a = input_size if layer_id == 0 else hidden_size b = hidden_size if layer_id < num_layers - 1 else num_classes w = variable_scope.get_variable('w', [a, b]) x = math_ops.matmul(x, w) x = nn_ops.relu(x) ops['y_preds'] = math_ops.argmax(x, axis=1) train_op = g.get_collection_ref(tf_ops.GraphKeys.TRAIN_OP) train_op.append(ops['y_preds']) return g @staticmethod def _buildCluster(num_cpus=1, num_gpus=1): devices = [] if num_gpus > 0: device_properties = device_properties_pb2.DeviceProperties( type='GPU', vendor='NVidia', model='GeForce GTX TITAN X', frequency=1076, num_cores=24, environment={'architecture': '5.2', 'cuda': '8000', 'cudnn': '6021'}, num_registers=65536, l1_cache_size=24576, l2_cache_size=3145728, shared_memory_size_per_multiprocessor=98304, memory_size=12783648768, bandwidth=336480000) for i in range(num_gpus): devices.append( device_properties_pb2.NamedDevice( properties=device_properties, name='/GPU:' + str(i))) assert num_cpus > 0 device_properties = device_properties_pb2.DeviceProperties( type='CPU', frequency=2000, num_cores=4, l1_cache_size=32768, l2_cache_size=262144, l3_cache_size=12582912) for i in range(num_cpus): devices.append( device_properties_pb2.NamedDevice( properties=device_properties, name='/CPU:' + str(i))) return cluster.Cluster(devices=devices) def testBasic(self): """Place a trivial graph.""" a = constant_op.constant(10, name='a') b = constant_op.constant(20, name='b') c = math_ops.add_n([a, b], name='c') d = math_ops.add_n([b, c], name='d') train_op = tf_ops.get_collection_ref(tf_ops.GraphKeys.TRAIN_OP) train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=tf_ops.get_default_graph()) gcluster = cluster.Cluster() placed_mg = graph_placer.PlaceGraph(mg, allotted_time=15, cluster=gcluster) self.assertEqual(4, len(placed_mg.graph_def.node)) self.assertItemsEqual([node.name for node in placed_mg.graph_def.node], [node.name for node in mg.graph_def.node]) available_devices = [device.name for device in gcluster.ListDevices()] for node in placed_mg.graph_def.node: # The constant nodes are optimized away before the placer is run, and # therefore won't be placed. self.assertTrue(not node.device or node.device in available_devices) def testMNIST(self): graph = GraphPlacerTest._buildMnist() mg = meta_graph.create_meta_graph_def(graph=graph) gcluster = GraphPlacerTest._buildCluster(num_gpus=1) # Spend 15 seconds trying to optimize the placement of the model. This # should give us enough time to exercise the code, but not enough to find # a good placement, so we'll just check for legality. placed_mg = graph_placer.PlaceGraph(mg, allotted_time=15, cluster=gcluster) self.assertEqual(len(placed_mg.graph_def.node), len(mg.graph_def.node)) self.assertItemsEqual([node.name for node in placed_mg.graph_def.node], [node.name for node in mg.graph_def.node]) available_devices = [device.name for device in gcluster.ListDevices()] for node in placed_mg.graph_def.node: self.assertTrue(not node.device or node.device in available_devices) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/graph_placer_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Provides a proper python API for the symbols exported through swig.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import graph_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python import pywrap_tensorflow as tf_opt from tensorflow.python.grappler import cluster as gcluster def OptimizeGraph(config_proto, metagraph, verbose=True, graph_id=b'graph_to_optimize', cluster=None): """Optimize the provided metagraph.""" if not isinstance(config_proto, config_pb2.ConfigProto): raise TypeError('Expected config_proto to be a ConfigProto, saw type %s' % type(config_proto)) if cluster is None: cluster = gcluster.Cluster() ret_from_swig = tf_opt.TF_OptimizeGraph(cluster.tf_cluster, config_proto.SerializeToString(), metagraph.SerializeToString(), verbose, graph_id) if ret_from_swig is None: return None out_graph = graph_pb2.GraphDef().FromString(ret_from_swig) return out_graph
tensorflow-master
tensorflow/python/grappler/tf_optimizer.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the datasets shape inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.grappler import item from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class GrapplerTest(test.TestCase): def testFromTensors(self): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape([]) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([3]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([1, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.from_tensors(test_case['tensor']) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testFromTensorSlices(self): test_cases = [{ 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([3]) }, { 'tensor': np.array([[[1, 2, 3]]]), 'shape': tensor_shape.TensorShape([1, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.from_tensor_slices(test_case['tensor']) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testFromGenerator(self): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape([]) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([3]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([1, 3]) }] for test_case in test_cases: def make_generator(tensor): def generator(): yield tensor return generator with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.from_generator( make_generator(test_case['tensor']), dtypes.int64, output_shapes=test_case['shape']) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testRange(self): with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.range(42) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(tensor_shape.scalar(), op_properties['IteratorGetNext'][0].shape) def _testTransformation(self, fn): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape({}) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([3]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([1, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.from_tensors(test_case['tensor']) dataset = fn(dataset, test_case['tensor'], test_case['shape']) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testConcatenate(self): def fn(dataset, tensor, shape): del shape return dataset.concatenate(dataset_ops.Dataset.from_tensors(tensor)) self._testTransformation(fn) def testPrefetch(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.prefetch(42) self._testTransformation(fn) def testRepeat(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.repeat(42) self._testTransformation(fn) def testShuffle(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.shuffle(42) self._testTransformation(fn) def testCache(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.cache() self._testTransformation(fn) def testTake(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.take(42) self._testTransformation(fn) def testSkip(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.skip(42) self._testTransformation(fn) def testShard(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.shard(42, 0) self._testTransformation(fn) def testFilter(self): def fn(dataset, tensor, shape): del tensor, shape return dataset.filter(lambda x: True) self._testTransformation(fn) def as_tensor_shape(self, proto_with_symbolic_values): for i in range(len(proto_with_symbolic_values.dim)): if proto_with_symbolic_values.dim[i].size < -1: proto_with_symbolic_values.dim[i].size = -1 return tensor_shape.TensorShape(proto_with_symbolic_values) def testBatch(self): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape([None]) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([None, 3]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([None, 1, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.from_tensors(test_case['tensor']) dataset = dataset.batch(42) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() inferred_shape = self.as_tensor_shape( op_properties['IteratorGetNext'][0].shape) self.assertTrue(test_case['shape'].dims[0].is_compatible_with( inferred_shape[0])) self.assertEqual(test_case['shape'][1:], inferred_shape[1:]) def testPaddedBatch(self): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape([None]) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([None, 4]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([None, 2, 4]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.from_tensors(test_case['tensor']) dataset = dataset.padded_batch(42, padded_shapes=test_case['shape'][1:]) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() inferred_shape = self.as_tensor_shape( op_properties['IteratorGetNext'][0].shape) self.assertTrue(test_case['shape'].dims[0].is_compatible_with( inferred_shape[0])) self.assertEqual(test_case['shape'][1:], inferred_shape[1:]) def testFlatMap(self): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape([]) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([3]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([1, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.range(42) def make_dataset(tensor): def dataset_fn(n): return dataset_ops.Dataset.from_tensors(tensor).repeat(n) return dataset_fn dataset = dataset.flat_map(make_dataset(test_case['tensor'])) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testInterleave(self): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape([]) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([3]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([1, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.range(42) def make_dataset(tensor): def dataset_fn(n): return dataset_ops.Dataset.from_tensors(tensor).repeat(n) return dataset_fn dataset = dataset.interleave( make_dataset(test_case['tensor']), cycle_length=42) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testMap(self): test_cases = [{ 'tensor': 0, 'shape': tensor_shape.TensorShape([]) }, { 'tensor': np.array([1, 2, 3]), 'shape': tensor_shape.TensorShape([3]) }, { 'tensor': np.array([[1, 2, 3]]), 'shape': tensor_shape.TensorShape([3, 1]) }, { 'tensor': np.array([[[1, 2, 3], [4, 5, 6]]]), 'shape': tensor_shape.TensorShape([3, 2, 1]) }] for test_case in test_cases: with ops.Graph().as_default() as g: dataset = dataset_ops.Dataset.from_tensors(test_case['tensor']) dataset = dataset.map(array_ops.transpose) iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testFromStructure(self): test_cases = [{ 'shape': tensor_shape.TensorShape([]) }, { 'shape': tensor_shape.TensorShape([3]) }, { 'shape': tensor_shape.TensorShape([1, 2]) }, { 'shape': tensor_shape.TensorShape([1, 2, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: iterator = iterator_ops.Iterator.from_structure( dtypes.int64, output_shapes=test_case['shape']) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) def testFromStringHandle(self): test_cases = [{ 'shape': tensor_shape.TensorShape([]) }, { 'shape': tensor_shape.TensorShape([3]) }, { 'shape': tensor_shape.TensorShape([1, 2]) }, { 'shape': tensor_shape.TensorShape([1, 2, 3]) }] for test_case in test_cases: with ops.Graph().as_default() as g: iterator = iterator_ops.Iterator.from_structure(dtypes.int64) handle = iterator.string_handle() iterator = iterator_ops.Iterator.from_string_handle( handle, dtypes.int64, output_shapes=test_case['shape']) get_next = iterator.get_next() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(get_next) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() self.assertEqual(test_case['shape'], op_properties['IteratorGetNext'][0].shape) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/datasets_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Grappler LayoutOptimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import device_properties_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.client import session from tensorflow.python.compat import compat from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.grappler import cluster as gcluster from tensorflow.python.grappler import tf_optimizer from tensorflow.python.layers import convolutional as conv_layers from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import gen_nn_ops from tensorflow.python.ops import map_fn from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent from tensorflow.python.training import saver as saver_lib def _weight(shape): """Generates a weight of a given shape.""" return random_ops.truncated_normal(shape, seed=0, stddev=0.1) def _bias(shape): """Generates a bias of a given shape.""" return constant_op.constant(0.1, shape=shape) def _conv2d(x, w): """Returns a 2d convolution layer with full stride.""" return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME') def _max_pool_2x2(x): """Downsamples a feature map by 2X.""" return nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py def _two_layer_model(x): x_image = array_ops.reshape(x, [-1, 28, 28, 1]) w_conv1 = _weight([5, 5, 1, 32]) b_conv1 = _bias([32]) h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1) h_pool1 = _max_pool_2x2(h_conv1) w_conv2 = _weight([5, 5, 32, 64]) b_conv2 = _bias([64]) h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2) h_pool2 = _max_pool_2x2(h_conv2) return h_pool2 def _model_with_second_port(): random_seed.set_random_seed(0) x = random_ops.truncated_normal([2, 5, 5, 4], seed=0) scale = constant_op.constant(0.1, shape=[4]) offset = constant_op.constant(0.3, shape=[4]) y, mean, _ = nn.fused_batch_norm(x, scale, offset) mul = math_ops.add(y, mean) output = array_ops.identity(mul) return output def _model_with_branch(x): x_image = array_ops.reshape(x, [-1, 28, 28, 1]) w_conv1 = _weight([5, 5, 1, 32]) w_conv2 = _weight([5, 5, 1, 32]) c_conv1 = _conv2d(x_image, w_conv1) c_conv2 = _conv2d(x_image, w_conv2) add = math_ops.add(c_conv1, c_conv2) return add def _model_with_vec_and_4d(x): x_image = array_ops.reshape(x, [-1, 28, 28, 1]) w_conv1 = _weight([5, 5, 1, 32]) c_conv1 = _conv2d(x_image, w_conv1) vector = constant_op.constant(6.4, shape=[32]) add = math_ops.add(c_conv1, vector) return add def _loop(): random_seed.set_random_seed(0) x1 = random_ops.truncated_normal([1, 784], seed=0) x2 = random_ops.truncated_normal([1, 784], seed=0) x3 = random_ops.truncated_normal([1, 784], seed=0) x4 = random_ops.truncated_normal([1, 784], seed=0) elems = (x1, x2, x3, x4) outputs = map_fn.map_fn(_two_layer_model, elems, dtype=dtypes.float32) return outputs def _loop_with_branch(): random_seed.set_random_seed(0) x1 = random_ops.truncated_normal([1, 784], seed=0) x2 = random_ops.truncated_normal([1, 784], seed=0) x3 = random_ops.truncated_normal([1, 784], seed=0) x4 = random_ops.truncated_normal([1, 784], seed=0) elems = (x1, x2, x3, x4) outputs = map_fn.map_fn(_model_with_branch, elems, dtype=dtypes.float32) return outputs def _loop_with_vec_and_4d(): random_seed.set_random_seed(0) x1 = random_ops.truncated_normal([1, 784], seed=0) x2 = random_ops.truncated_normal([1, 784], seed=0) x3 = random_ops.truncated_normal([1, 784], seed=0) x4 = random_ops.truncated_normal([1, 784], seed=0) elems = (x1, x2, x3, x4) outputs = map_fn.map_fn(_model_with_vec_and_4d, elems, dtype=dtypes.float32) return outputs def _get_config(layout_optimizer=True): if layout_optimizer: rewrite_options = rewriter_config_pb2.RewriterConfig( layout_optimizer=rewriter_config_pb2.RewriterConfig.ON, # do not remove duplicated nodes arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF) else: rewrite_options = rewriter_config_pb2.RewriterConfig( layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF, # do not remove duplicated nodes arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF) rewrite_options.min_graph_nodes = -1 graph_options = config_pb2.GraphOptions( rewrite_options=rewrite_options, build_cost_model=1) config = config_pb2.ConfigProto(graph_options=graph_options) config.graph_options.optimizer_options.opt_level = -1 return config def _simple_metagraph(depthwise=False): random_seed.set_random_seed(0) x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0)) conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d y = conv(x, 32, [3, 3]) z = conv(y, 32, [3, 3]) optimizer = gradient_descent.GradientDescentOptimizer(1e-4) loss = math_ops.reduce_mean(z) train_op = optimizer.minimize(loss) graph = ops.get_default_graph() graph.add_to_collection('train_op', train_op) meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def()) return meta_graph def _get_cluster(): named_device = device_properties_pb2.NamedDevice() named_device.name = '/GPU:0' named_device.properties.type = 'GPU' named_device.properties.num_cores = 24 named_device.properties.frequency = 1000 named_device.properties.environment['architecture'] = '4' cluster = gcluster.Cluster(devices=[named_device]) return cluster def _is_transpose(node): return node.endswith('TransposeNHWCToNCHW-LayoutOptimizer') or node.endswith( 'TransposeNCHWToNHWC-LayoutOptimizer') def _is_permute(node): return node.endswith('VecPermuteNHWCToNCHW-LayoutOptimizer') or node.endswith( 'VecPermuteNCHWToNHWC-LayoutOptimizer') @test_util.for_all_test_methods(test_util.no_xla_auto_jit, 'Test does not apply in XLA setting') class LayoutOptimizerTest(test.TestCase): """Tests the Grappler layout optimizer.""" def _assert_trans_nchw_to_nhwc(self, name, nodes): self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes) def _assert_trans_nhwc_to_nchw(self, name, nodes): self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes) def _assert_map_nhwc_to_nchw(self, name, nodes): self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes) def _assert_vec_nchw_to_nhwc(self, name, nodes): self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes) def _assert_vec_nhwc_to_nchw(self, name, nodes): self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes) def _train(self, checkpoint_path, layout_optimizer=False, restore=False): ops.reset_default_graph() graph = ops.get_default_graph() with session.Session( config=_get_config(layout_optimizer), graph=graph) as sess: batch = 2 height = 6 width = 7 input_channels = 3 shape = [batch, height, width, input_channels] image = array_ops.placeholder(dtype='float32', shape=shape) conv1 = conv_layers.conv2d(image, 32, [3, 3]) conv2 = conv_layers.conv2d(conv1, 32, [3, 3]) optimizer = gradient_descent.GradientDescentOptimizer(0.01) loss = math_ops.reduce_mean(conv2) train_op = optimizer.minimize(loss) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2) if restore: saver.restore(sess, checkpoint_path) else: self.evaluate(variables.global_variables_initializer()) np.random.seed(0) for _ in range(2): image_val = np.random.rand(*shape).astype(np.float32) sess.run([loss, train_op], feed_dict={image: image_val}) if restore: all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) all_vars_values = [var.eval(session=sess) for var in all_vars] return all_vars_values else: saver.save(sess, checkpoint_path) @test_util.deprecated_graph_mode_only def testTwoConvLayers(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) output = _two_layer_model(x) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSplitWithNonConstAxis(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) dim = array_ops.placeholder(dtype='int32') split = array_ops.split(conv, 2, axis=dim) scale = constant_op.constant(0.1, shape=[32]) offset = constant_op.constant(0.3, shape=[32]) bn0 = nn.fused_batch_norm(split[0], scale, offset) bn1 = nn.fused_batch_norm(split[1], scale, offset) add = bn0[0] + bn1[0] output = array_ops.identity(add) with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={dim: 3}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3}) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes) self._assert_map_nhwc_to_nchw('split-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSplitVWithNonConstAxis(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) dim = array_ops.placeholder(dtype='int32') sizes = constant_op.constant([50, 10, 4], shape=[3]) split = gen_array_ops.split_v( value=conv, size_splits=sizes, axis=dim, num_split=3) output = math_ops.reduce_sum(split[0]) with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={dim: 3}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3}) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes) self._assert_map_nhwc_to_nchw('SplitV-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testPadWithConstPaddings(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]] paddings = constant_op.constant( paddings_val, dtype='int32', name='PaddingsConst') pad = array_ops.pad(conv, paddings) output = array_ops.identity(pad) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes) self.assertIn('Pad-1-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReduceSum(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv) output = array_ops.identity(reduce_sum) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Three transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testCast(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) cast = math_ops.cast(conv, dtype='bool') output = array_ops.identity(cast) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSqueeze(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2]) squeeze = array_ops.squeeze(reduce_sum) output = array_ops.identity(squeeze) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Three transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSqueezeAlongHW(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2], keepdims=True) squeeze = array_ops.squeeze(reduce_sum, axis=[1, 2]) output = array_ops.identity(squeeze) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Three transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSqueezeAlongNHW(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2], keepdims=True) squeeze = array_ops.squeeze(reduce_sum, axis=[0, 1, 2]) output = array_ops.identity(squeeze) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Three transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReduceSumAlongHWC(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3]) output = array_ops.identity(reduce_sum) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Three transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReduceSumAlongNHW(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2]) output = array_ops.identity(reduce_sum) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Three transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReduceSumAlongC(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[3]) output = array_ops.identity(reduce_sum) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Three transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReduceSumAlongCKeepDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[3], keepdims=True) output = array_ops.identity(reduce_sum) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReduceSumAlongHKeepDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[2], keepdims=True) output = array_ops.identity(reduce_sum) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReduceSumAlongWCKeepDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) reduce_sum = math_ops.reduce_sum(conv, axis=[2, 3], keepdims=True) output = array_ops.identity(reduce_sum) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testConcatWithControlDependency(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) axis = constant_op.constant(3) var = variables.Variable(3) assign = state_ops.assign(var, 6) with ops.control_dependencies([assign]): concat = array_ops.concat([conv, conv], axis) output = array_ops.identity(concat) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('concat-0-0', nodes) self.assertIn('concat-2-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testFill(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = array_ops.placeholder(dtype='float32') conv = _two_layer_model(x) shape = array_ops.shape(conv) scalar = array_ops.constant(5.7) fill = array_ops.fill(shape, scalar) output = array_ops.identity(fill) x_val = [3.4] * 784 with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={x: x_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ x: x_val }) nodes = [] num_transposes = 0 num_vec_permute = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 if _is_permute(node.name): num_vec_permute += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) # Two vector permute nodes were initially added in the Expand phase of # LayoutOptimizer; they cancelled out each other in the Collapse phase. expected_vec_permute = 0 self.assertEqual(expected_vec_permute, num_vec_permute) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testTile(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) multiple = array_ops.placeholder(dtype='int32') tile = array_ops.tile(conv, multiple) output = array_ops.identity(tile) multiple_val = [2, 3, 4, 1] with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={multiple: multiple_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ multiple: multiple_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes) self._assert_vec_nhwc_to_nchw('Tile-1', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReverseWithConstDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) dims = constant_op.constant([3, 1], name='DimsConst') reverse = array_ops.reverse(conv, dims) output = array_ops.identity(reverse) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes) self.assertIn('ReverseV2-1-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testReverseWithNonConstDims(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) dims = array_ops.placeholder(dtype='int32') reverse = array_ops.reverse(conv, dims) output = array_ops.identity(reverse) dims_val = [2, 3] with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={dims: dims_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ dims: dims_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes) self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSelectOp(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) add = math_ops.add(conv, conv) mean = math_ops.reduce_mean(conv) condition = math_ops.less(conv, mean) select = gen_math_ops.select(condition, conv, add) output = array_ops.identity(select) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Select-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSelectOpConditionUnknownShape(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) add = math_ops.add(conv, conv) condition = array_ops.placeholder(dtype='bool') select = gen_math_ops.select(condition, conv, add) output = array_ops.identity(select) condition_val = np.zeros((1, 7, 7, 64)) with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={condition: condition_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={condition: condition_val}) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 3 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSelectOpScalarCondition(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) add = math_ops.add(conv, conv) condition = constant_op.constant(True) select = gen_math_ops.select(condition, conv, add) output = array_ops.identity(select) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Select-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testPadWithNonConstPaddings(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) paddings = array_ops.placeholder(dtype='int32') pad = array_ops.pad(conv, paddings) output = array_ops.identity(pad) paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]] with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={paddings: paddings_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ paddings: paddings_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes) self._assert_vec_nhwc_to_nchw('Pad-1', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testMaxPoolV2(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) ksize = constant_op.constant([1, 2, 3, 1], shape=[4]) strides = array_ops.placeholder(dtype='int32', shape=[4]) max_pool = gen_nn_ops.max_pool_v2(conv, ksize, strides, 'VALID') output = array_ops.identity(max_pool) strides_val = [1, 3, 2, 1] with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={strides: strides_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ strides: strides_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes) self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes) self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testMaxPoolGradV2(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) ksize = constant_op.constant([1, 2, 3, 1], shape=[4]) strides = array_ops.placeholder(dtype='int32', shape=[4]) max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize, strides, 'VALID') output = array_ops.identity(max_pool_grad) strides_val = [1, 3, 2, 1] with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={strides: strides_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ strides: strides_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes) self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes) self.assertIn('MaxPoolGradV2-3-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testSliceWithNonConstAxis(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) size = array_ops.placeholder(dtype='int32') s = array_ops.slice(conv, [0, 0, 0, 0], size) output = array_ops.identity(s) size_val = [1, 2, 3, 4] with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={size: size_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ size: size_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes) self._assert_vec_nhwc_to_nchw('Slice-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testStridedSliceWithNonConstAxis(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) end = array_ops.placeholder(dtype='int32') s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1]) output = array_ops.identity(s) end_val = [1, 2, 3, 4] with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={end: end_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ end: end_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes) self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes) self.assertIn('StridedSlice-1-LayoutOptimizer', nodes) self.assertIn('StridedSlice-3-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testStridedSliceWithMask1011(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) # This will generate a StridedSlice op with begin mask and # end mask 11(1011). s = conv[:, :, 1:-1, :] output = array_ops.identity(s) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes) self.assertIn('strided_slice-1-LayoutOptimizer', nodes) self.assertIn('strided_slice-2-LayoutOptimizer', nodes) self.assertIn('strided_slice-3-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testStridedSliceWithMask0111(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) # This will generate a StridedSlice op with begin mask and # end mask 7(0111). s = conv[:, :, :, 1:-1] output = array_ops.identity(s) with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes) self.assertIn('strided_slice-1-LayoutOptimizer', nodes) self.assertIn('strided_slice-2-LayoutOptimizer', nodes) self.assertIn('strided_slice-3-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testStridedSliceGradWithNonConstAxis(self): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = random_ops.truncated_normal([1, 784], seed=0) conv = _two_layer_model(x) end = array_ops.placeholder(dtype='int32') shape = array_ops.shape(conv) end_val = [1, 2, 3, 4] s = array_ops.strided_slice( conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1]) s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end, [1, 2, 3, 1], s) output = array_ops.identity(s_grad) with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={end: end_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ end: end_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes) self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes) self.assertIn('StridedSlice-1-LayoutOptimizer', nodes) self.assertIn('StridedSlice-2-LayoutOptimizer', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testShapeN(self): if test.is_gpu_available(cuda_only=True): x = array_ops.placeholder(dtype='float32') conv = _two_layer_model(x) shapen = array_ops.shape_n([conv, conv]) output = math_ops.add(shapen[0], shapen[1]) x_val = [1.7] * 784 with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={x: x_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={ x: x_val }) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 1 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes) self.assertAllEqual(output_val_ref, output_val) @test_util.deprecated_graph_mode_only def testShapeNFollowedByNotConvertibleNodeReshape(self): if test.is_gpu_available(cuda_only=True): x = array_ops.placeholder(dtype='float32') conv = _two_layer_model(x) conv_reshape = array_ops.reshape(conv, [1, 1, 1, -1]) shapen = array_ops.shape_n([conv, conv_reshape]) shape = array_ops.identity(shapen[1]) ones = array_ops.ones(shape) output = math_ops.add_n([conv_reshape, ones]) x_val = [1.7] * 784 with session.Session(config=_get_config(False)) as sess: output_val_ref = sess.run(output, feed_dict={x: x_val}) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run( output, run_metadata=metadata, feed_dict={x: x_val}) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testLoop(self): if test.is_gpu_available(cuda_only=True): output = _loop() with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) # Four transposes were initially added in the Expand phase of # LayoutOptimizer; two of them are cancelled out in the Collapse phase. expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testLoopWithBranch(self): if test.is_gpu_available(cuda_only=True): output = _loop_with_branch() with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 3 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testLoopWithVecAnd4D(self): if test.is_gpu_available(cuda_only=True): output = _loop_with_vec_and_4d() with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes) self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testBinaryOpSecondPort(self): with compat.forward_compatibility_horizon(2019, 6, 7): if test.is_gpu_available(cuda_only=True): output = _model_with_second_port() with session.Session(config=_get_config(False)) as sess: output_val_ref = self.evaluate(output) with session.Session(config=_get_config()) as sess: metadata = config_pb2.RunMetadata() output_val = sess.run(output, run_metadata=metadata) nodes = [] num_transposes = 0 for node in metadata.cost_graph.node: if _is_transpose(node.name): num_transposes += 1 nodes.append(node.name) expected_num_transposes = 2 self.assertEqual(expected_num_transposes, num_transposes) self._assert_trans_nhwc_to_nchw('FusedBatchNormV3-0', nodes) self._assert_trans_nchw_to_nhwc('Add-0-0', nodes) self.assertAllClose(output_val_ref, output_val, atol=1e-3) @test_util.deprecated_graph_mode_only def testGradient(self): meta_graph = _simple_metagraph() config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( layout_optimizer=rewriter_config_pb2.RewriterConfig.ON, min_graph_nodes=-1)) optimized_graph = tf_optimizer.OptimizeGraph( config, meta_graph, cluster=_get_cluster()) found = 0 for node in optimized_graph.node: if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']: found += 1 self.assertEqual(node.attr['data_format'].s, b'NCHW') self.assertEqual(found, 5) @test_util.deprecated_graph_mode_only def testDepthwise(self): meta_graph = _simple_metagraph(depthwise=True) config = config_pb2.ConfigProto() config.graph_options.rewrite_options.CopyFrom( rewriter_config_pb2.RewriterConfig( layout_optimizer=rewriter_config_pb2.RewriterConfig.ON, min_graph_nodes=-1)) optimized_graph = tf_optimizer.OptimizeGraph( config, meta_graph, cluster=_get_cluster()) found = 0 for node in optimized_graph.node: if node.op in [ 'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter', 'DepthwiseConv2dNativeBackpropInput' ]: found += 1 self.assertEqual(node.attr['data_format'].s, b'NCHW') self.assertEqual(found, 6) def testCheckpointCompatibility(self): if not test.is_gpu_available(cuda_only=True): self.skipTest('GPU required') checkpoint_path = self.get_temp_dir() self._train(checkpoint_path) vars_expected = self._train(checkpoint_path, restore=True) vars_layout_optimized = self._train( checkpoint_path, restore=True, layout_optimizer=True) for var_expected, var_layout_optimized in zip(vars_expected, vars_layout_optimized): self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/layout_optimizer_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the swig wrapper of items.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.grappler import item from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class ItemTest(test.TestCase): def testInvalidItem(self): with ops.Graph().as_default() as g: a = constant_op.constant(10) b = constant_op.constant(20) c = a + b # pylint: disable=unused-variable mg = meta_graph.create_meta_graph_def(graph=g) # The train op isn't specified: this should raise an InvalidArgumentError # exception. with self.assertRaises(errors_impl.InvalidArgumentError): item.Item(mg) def testImportantOps(self): with ops.Graph().as_default() as g: a = constant_op.constant(10) b = constant_op.constant(20) c = a + b train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_list = grappler_item.IdentifyImportantOps() self.assertItemsEqual(['Const', 'Const_1', 'add'], op_list) def testOpProperties(self): with ops.Graph().as_default() as g: a = constant_op.constant(10) b = constant_op.constant(20) c = a + b z = control_flow_ops.no_op() train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) op_properties = grappler_item.GetOpProperties() # All the nodes in this model have one scalar output for node in grappler_item.metagraph.graph_def.node: node_prop = op_properties[node.name] if node.name == z.name: self.assertEqual(0, len(node_prop)) else: self.assertEqual(1, len(node_prop)) self.assertEqual(dtypes.int32, node_prop[0].dtype) self.assertEqual(tensor_shape.scalar(), node_prop[0].shape) def testUpdates(self): with ops.Graph().as_default() as g: a = constant_op.constant(10) b = constant_op.constant(20) c = a + b train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) initial_tf_item = grappler_item.tf_item no_change_tf_item = grappler_item.tf_item self.assertEqual(initial_tf_item, no_change_tf_item) # Modify the placement. for node in grappler_item.metagraph.graph_def.node: node.device = '/cpu:0' new_tf_item = grappler_item.tf_item self.assertNotEqual(initial_tf_item, new_tf_item) # Assign the same placement. for node in grappler_item.metagraph.graph_def.node: node.device = '/cpu:0' newest_tf_item = grappler_item.tf_item self.assertEqual(new_tf_item, newest_tf_item) @test_util.run_v1_only('b/120545219') def testColocationContraints(self): with ops.Graph().as_default() as g: c = constant_op.constant([10]) v = variables.VariableV1([3], dtype=dtypes.int32) i = gen_array_ops.ref_identity(v) a = state_ops.assign(i, c) train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(a) mg = meta_graph.create_meta_graph_def(graph=g) grappler_item = item.Item(mg) groups = grappler_item.GetColocationGroups() self.assertEqual(len(groups), 1) self.assertItemsEqual( groups[0], ['Assign', 'RefIdentity', 'Variable', 'Variable/Assign']) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/item_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Grappler AutoMixedPrecision.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session from tensorflow.python.compat import compat from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_impl from tensorflow.python.ops import random_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent def _input(shape): """Generates an input of a given shape.""" return variables.Variable(random_ops.truncated_normal(shape, seed=0)) def _weight(shape): """Generates a weight of a given shape.""" # Note that the lambda is needed to allow construction inside loops. return variables.Variable( lambda: init_ops.glorot_uniform_initializer(seed=0)(shape)) def _bias(shape): """Generates a bias of a given shape.""" return constant_op.constant(0.1, shape=shape) def _conv2d(x, w): """Returns a 2d convolution layer with full stride.""" return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME') def _max_pool_2x2(x): """Downsamples a feature map by 2X.""" return nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def _fused_batchnorm(x, scale, offset): """Batchnorm.""" return nn_impl.fused_batch_norm( x, scale=scale, offset=offset, is_training=True) def _conv_bn(x): """Conv followed by batchnorm.""" i = array_ops.reshape(x, [-1, 8, 8, 1]) f = _weight([3, 3, 1, 6]) x = _conv2d(i, f) s = _weight([6]) o = _weight([6]) y, _, _ = _fused_batchnorm(x, s, o) y = array_ops.identity(y) return y def _matmul_act(x): """Matmul followed by activation.""" i = array_ops.reshape(x, [8, 8]) f = _weight([8, 8]) x = math_ops.matmul(i, f) y = nn.relu(x) return y def _conv_pool(x): """(Conv -> bias -> relu -> max_pool) x2.""" x_image = array_ops.reshape(x, [-1, 8, 8, 1]) w_conv1 = _weight([3, 3, 1, 6]) b_conv1 = _bias([6]) h_conv1 = nn.relu(nn.bias_add(_conv2d(x_image, w_conv1), b_conv1)) h_pool1 = _max_pool_2x2(h_conv1) w_conv2 = _weight([3, 3, 6, 4]) b_conv2 = _bias([4]) h_conv2 = nn.relu(nn.bias_add(_conv2d(h_pool1, w_conv2), b_conv2)) h_pool2 = _max_pool_2x2(h_conv2) return h_pool2 def _simple_loop(x, functor): """Simple loop whose body is provided by the functor.""" init = (constant_op.constant(0), x) c = lambda i, j: i < 4 b = lambda i, j: (i + 1, functor(j)) ij = control_flow_ops.while_loop(c, b, init) return ij def _loop_vars_intertwined(x0, y0, functor_x, functor_y): """Loop whose loop variables are intertwined.""" c = lambda i, j, x, y: j < 4 b = lambda i, j, x, y: (j + 1, i + 1, functor_y(y), functor_x(x)) init = (constant_op.constant(0), constant_op.constant(0), x0, y0) ijzw = control_flow_ops.while_loop(c, b, init) return ijzw def _lstm_cell(prev_c, prev_h, x): """Create an LSTM cell.""" # i: input gate # f: forget gate # o: output gate # c: cell state # x: input # h: embedding bias = _bias([4]) w = _weight([8, 16]) ifoc = math_ops.matmul(array_ops.concat([x, prev_h], axis=1), w) i, f, o, c = array_ops.split(ifoc, 4, axis=1) i = math_ops.sigmoid(nn.bias_add(i, bias)) f = math_ops.sigmoid(nn.bias_add(f, bias)) o = math_ops.sigmoid(nn.bias_add(o, bias)) c = math_ops.tanh(nn.bias_add(c, bias)) next_c = f * prev_c + i * c next_h = o * math_ops.tanh(next_c) return next_c, next_h def _recurrent_lstm(c, h): """Dynamic single-layer LSTM with TensorArray.""" def cond(i, c, h, ta_x): del c, h, ta_x return i < 4 def body(i, c, h, ta_x): x = ta_x.read(i) next_c, next_h = _lstm_cell(c, h, x) return (i + 1, next_c, next_h, ta_x) ta_x = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=4) for i in range(0, 4): ta_x = ta_x.write( i, constant_op.constant(0.1, shape=[8, 4], dtype=dtypes.float32)) init = (constant_op.constant(0), c, h, ta_x) r = control_flow_ops.while_loop(cond, body, init) return r def _make_node_with_color(color, input_tensor, name=None): """Returns a node representative of the specified list type.""" color = color.lower() if color == 'w': # White node weights = _weight(input_tensor.get_shape().as_list()) return math_ops.matmul(input_tensor, weights, name=name) if color == 'g': # Gray node return math_ops.add(input_tensor, 0.1, name=name) if color == 'c': # Clear node return nn.relu(input_tensor, name=name) if color == 'b': # Black node return math_ops.sqrt(math_ops.pow(input_tensor, 2.), name=name) raise ValueError('Invalid node color: ' + str(color)) def _build_simple_loop_graph(inp_colors, body_colors, out_colors): """Builds a test graph with a simple loop.""" a = _input([8, 8]) for i, color in enumerate(inp_colors): a = _make_node_with_color(color, a, 'input_%i' % i) def body(x): for i, color in enumerate(body_colors): x = _make_node_with_color(color, x, 'body_%i' % i) return x _, a = _simple_loop(a, body) for i, color in enumerate(out_colors): a = _make_node_with_color(color, a, 'output_%i' % i) a = array_ops.identity(a) return a def _get_config(auto_mixed_precision=True): """Returns a ConfigProto with auto mixed precision enabled if appropriate.""" if auto_mixed_precision: rewrite_config = rewriter_config_pb2.RewriterConfig( auto_mixed_precision=rewriter_config_pb2.RewriterConfig.ON, # do not remove duplicated nodes arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF) else: rewrite_config = rewriter_config_pb2.RewriterConfig( auto_mixed_precision=rewriter_config_pb2.RewriterConfig.OFF, # do not remove duplicated nodes arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF) rewrite_config.min_graph_nodes = -1 graph_options = config_pb2.GraphOptions( rewrite_options=rewrite_config, build_cost_model=1) config = config_pb2.ConfigProto(graph_options=graph_options) config.graph_options.optimizer_options.opt_level = -1 return config def _is_cast_to_fp16(node_name): return node_name.endswith('-CastToFp16-AutoMixedPrecision') def _is_cast_to_fp32(node_name): return node_name.endswith('-CastToFp32-AutoMixedPrecision') def _count_casts(nodes): num_to_fp16 = 0 num_to_fp32 = 0 for node in nodes: if _is_cast_to_fp16(node.name): num_to_fp16 += 1 elif _is_cast_to_fp32(node.name): num_to_fp32 += 1 return num_to_fp16, num_to_fp32 def _build_node_map(nodes): node_map = {} for node in nodes: node_map[node.name] = node return node_map class AutoMixedPrecisionTest(test.TestCase): """Tests the Grappler auto mixed precision optimizer.""" IGNORE_PERF_VAR = 'TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE' # TODO(benbarsdell): Add tests for eager mode with a tf.function. def setUp(self): super(AutoMixedPrecisionTest, self).setUp() # Enable the tests to be run on pre-Volta GPUs by telling the grappler pass # to ignore performance and always transform the graph. self._original_ignore_perf_value = os.getenv(self.IGNORE_PERF_VAR) os.environ[self.IGNORE_PERF_VAR] = '1' def tearDown(self): if self._original_ignore_perf_value is not None: os.environ[self.IGNORE_PERF_VAR] = self._original_ignore_perf_value else: del os.environ[self.IGNORE_PERF_VAR] super(AutoMixedPrecisionTest, self).tearDown() def _assert_output_fp16(self, node_map, node_name, output_port=0): self.assertEqual(node_map[node_name].output_info[output_port].dtype, types_pb2.DT_HALF) def _run(self, fetches): """Runs the graph and returns the evaluation of the fetches.""" with session.Session(config=_get_config(False)) as sess: sess.run(variables.global_variables_initializer()) output_val_ref = self.evaluate(fetches) with session.Session(config=_get_config()) as sess: sess.run(variables.global_variables_initializer()) metadata = config_pb2.RunMetadata() output_val = sess.run(fetches, run_metadata=metadata) return output_val_ref, output_val, metadata.cost_graph def _run_simple_loop_test(self, inp, body, out): """Runs a test of a simple loop. The loop has different node colors in different sections of the graph. The arguments must be strings where each character represents the color of a node in that section of the graph: w = white, g = gray, c = clear, b = black. CAPITALIZED characters indicate that the node is expected to be changed to DT_HALF during graph optimization. inp -> loop [ body ] -> out. Args: inp: A string of letters indicating the colors and expected dtypes of the input nodes. body: A string of letters indicating the colors and expected dtypes of the body nodes. out: A string of letters indicating the colors and expected dtypes of the output nodes. """ if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) expected_types = [] for section in [inp, body, out]: section_expected_types = [] for color in section: if color.isupper(): expected_type = types_pb2.DT_HALF else: expected_type = types_pb2.DT_FLOAT section_expected_types.append(expected_type) expected_types.append(section_expected_types) a = _build_simple_loop_graph(inp, body, out) output_val_ref, output_val, cost_graph = self._run(a) node_map = _build_node_map(cost_graph.node) section_names = ['input', 'while/body', 'output'] all_types_correct = True for section_name, expected_types in zip(section_names, expected_types): for i, expected_type in enumerate(expected_types): node_name = section_name + '_%i' % i output_port = 0 optimized_type = node_map[node_name].output_info[output_port].dtype if optimized_type != expected_type: print('Expected node %s to have type %s but got type %s' % (node_name, expected_type, optimized_type)) all_types_correct = False self.assertTrue(all_types_correct) self.assertAllClose(output_val_ref, output_val, atol=2e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_conv_bn(self): """Test graph with convolution followed by batch norm.""" with compat.forward_compatibility_horizon(2019, 6, 7): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = _input([2, 8, 8, 1]) x = _conv_bn(x) output = _conv_bn(x) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) num_to_fp16, num_to_fp32 = _count_casts(cost_graph.node) self._assert_output_fp16(node_map, 'Conv2D') self._assert_output_fp16(node_map, 'FusedBatchNormV3') self._assert_output_fp16(node_map, 'Conv2D_1') self.assertEqual(num_to_fp16, 3) # Before Conv2D:0, Conv2D:1, Conv2D_1:1 self.assertEqual(num_to_fp32, 1) # After FusedBatchNormV3:0 self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_conv_bn_dropout(self): """Test dropout precision of convolution batch norm graph.""" with compat.forward_compatibility_horizon(2019, 6, 7): if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = _input([2, 8, 8, 1]) y = _conv_bn(x) y = nn.dropout(y, rate=0.5) y = _conv_bn(y) y = array_ops.identity(y) optimizer = gradient_descent.GradientDescentOptimizer( learning_rate=0.01) g = optimizer.compute_gradients(y, [x]) output = (y, g) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) self._assert_output_fp16(node_map, 'Conv2D') self._assert_output_fp16(node_map, 'FusedBatchNormV3') self._assert_output_fp16(node_map, 'dropout/mul') self._assert_output_fp16(node_map, 'Conv2D_1') output_val_ref, output_val, cost_graph = self._run(output) self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_conv_pool(self): """Test graph with convolution followed by pooling.""" if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = _input([2, 8, 8, 1]) output = _conv_pool(x) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) num_to_fp16, num_to_fp32 = _count_casts(cost_graph.node) self._assert_output_fp16(node_map, 'Conv2D') self._assert_output_fp16(node_map, 'Relu') self._assert_output_fp16(node_map, 'MaxPool') self._assert_output_fp16(node_map, 'Conv2D_1') self.assertEqual(num_to_fp16, 4) self.assertEqual(num_to_fp32, 1) self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_simple_loop(self): """Test graph with while loop.""" if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = _input([8, 8]) y = _simple_loop(x, _matmul_act)[1] optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01) g = optimizer.compute_gradients(y, [x]) output = (y, g) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) self._assert_output_fp16(node_map, 'while/MatMul') self._assert_output_fp16(node_map, 'while/Relu') self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_loop_with_vars_intertwined(self): """Test graph with intertwined while loops.""" if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = _input([8, 8]) _, _, k, l = _loop_vars_intertwined( array_ops.ones(array_ops.shape(x)), x, _matmul_act, _matmul_act) optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01) g = optimizer.compute_gradients(k, [x]) output = (k, l, g) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) self._assert_output_fp16(node_map, 'while/MatMul') self._assert_output_fp16(node_map, 'while/Relu') self._assert_output_fp16(node_map, 'while/MatMul_1') self._assert_output_fp16(node_map, 'while/Relu_1') self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_multi_paths(self): """Test graph with multiple paths.""" if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = _input([2, 8, 8, 3]) x1, x2, x3 = array_ops.split(x, num_or_size_splits=3, axis=3) y1 = _conv_pool(x1) y2 = _conv_pool(x2) y3 = _conv_pool(x3) y = array_ops.concat([y1, y2, y3], axis=3) y = array_ops.identity(y) optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01) g = optimizer.compute_gradients(y, [x]) output = (y, g) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) self._assert_output_fp16(node_map, 'split') for suffix in [''] + ['_%i' % i for i in range(1, 6)]: self._assert_output_fp16(node_map, 'Conv2D' + suffix) self._assert_output_fp16(node_map, 'Relu' + suffix) self._assert_output_fp16(node_map, 'MaxPool' + suffix) self._assert_output_fp16(node_map, 'concat') self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_multi_paths_2(self): """Test graph with multiple paths.""" if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) x = _input([8, 8]) y1 = _matmul_act(x) y2 = _matmul_act(x) y = y1 + y2 + x optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01) g = optimizer.compute_gradients(y, [x]) output = (g, y) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) self._assert_output_fp16(node_map, 'MatMul') self._assert_output_fp16(node_map, 'Relu') self._assert_output_fp16(node_map, 'MatMul_1') self._assert_output_fp16(node_map, 'Relu_1') self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_recurrent_lstm(self): """Test graph with recurrent lstm.""" if test.is_gpu_available(cuda_only=True): random_seed.set_random_seed(0) init_c = _input([8, 4]) init_h = _input([8, 4]) _, _, h, _ = _recurrent_lstm(init_c, init_h) optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.01) g = optimizer.compute_gradients(h, [init_c, init_h]) output = (h, g) output_val_ref, output_val, cost_graph = self._run(output) node_map = _build_node_map(cost_graph.node) self._assert_output_fp16(node_map, 'while/concat') self._assert_output_fp16(node_map, 'while/MatMul') self._assert_output_fp16(node_map, 'while/split') self._assert_output_fp16(node_map, 'while/Sigmoid') self._assert_output_fp16(node_map, 'while/Sigmoid_1') self._assert_output_fp16(node_map, 'while/Sigmoid_2') self._assert_output_fp16(node_map, 'while/Tanh') self._assert_output_fp16(node_map, 'while/Tanh_1') self.assertAllClose(output_val_ref, output_val, atol=1e-3, rtol=1e-3) @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_1(self): self._run_simple_loop_test('W', 'C', 'C') @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_2(self): self._run_simple_loop_test('C', 'C', 'W') @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_3(self): self._run_simple_loop_test('W', 'G', 'W') @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_4(self): self._run_simple_loop_test('W', 'gbg', 'W') @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_5(self): self._run_simple_loop_test('b', 'gWC', 'c') @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_6(self): self._run_simple_loop_test('b', 'CWCG', 'C') @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_7(self): self._run_simple_loop_test('C', 'GWCG', 'C') @test_util.run_deprecated_v1 def test_propagation_through_simple_loop_8(self): self._run_simple_loop_test('C', 'CgbgWC', 'g') if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/auto_mixed_precision_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A python interface for Grappler clusters.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib from tensorflow.core.framework import step_stats_pb2 from tensorflow.core.grappler.costs import op_performance_data_pb2 from tensorflow.core.protobuf import device_properties_pb2 from tensorflow.python import pywrap_tensorflow as tf_cluster class Cluster(object): """Grappler Clusters.""" def __init__(self, allow_soft_placement=True, disable_detailed_stats=True, disable_timeline=True, devices=None): """Creates a Cluster. Args: allow_soft_placement: If True, TF will automatically fix illegal placements instead of erroring out if the placement isn't legal. disable_detailed_stats: If True, detailed statistics will not be available. disable_timeline: If True, the timeline information will not be reported. devices: A list of devices of type device_properties_pb2.NamedDevice. If None, a device list will be created based on the spec of the local machine. """ self._tf_cluster = None self._generate_timeline = not disable_timeline if devices is None: self._tf_cluster = tf_cluster.TF_NewCluster(allow_soft_placement, disable_detailed_stats) else: devices_serialized = [device.SerializeToString() for device in devices] self._tf_cluster = tf_cluster.TF_NewVirtualCluster(devices_serialized) def Shutdown(self): if self._tf_cluster is not None: tf_cluster.TF_ShutdownCluster(self._tf_cluster) self._tf_cluster = None def __del__(self): self.Shutdown() @property def tf_cluster(self): return self._tf_cluster def ListDevices(self): """Returns a list of available hardware devices.""" if self._tf_cluster is None: return [] return [device_properties_pb2.NamedDevice.FromString(device) for device in tf_cluster.TF_ListDevices(self._tf_cluster)] def ListAvailableOps(self): """Returns a list of all available operations (sorted alphabetically).""" return tf_cluster.TF_ListAvailableOps() def GetSupportedDevices(self, item): return tf_cluster.TF_GetSupportedDevices(self._tf_cluster, item.tf_item) def EstimatePerformance(self, device): return tf_cluster.TF_EstimatePerformance(device.SerializeToString()) def MeasureCosts(self, item): """Returns the cost of running the specified item. Args: item: The item for which to measure the costs. Returns: The triplet op_perfs, runtime, step_stats. """ ret_from_swig = tf_cluster.TF_MeasureCosts(item.tf_item, self._tf_cluster, self._generate_timeline) if ret_from_swig is None: return None op_perf_bytes_list, run_time, step_stats_bytes = ret_from_swig op_perfs = [op_performance_data_pb2.OpPerformance.FromString(op_perf_bytes) for op_perf_bytes in op_perf_bytes_list] return (op_perfs, run_time, step_stats_pb2.StepStats.FromString(step_stats_bytes)) def DeterminePeakMemoryUsage(self, item): """Returns a snapshot of the peak memory usage. Args: item: The item for which to measure the costs. Returns: A hashtable indexed by device name. """ return tf_cluster.TF_DeterminePeakMemoryUsage(item.tf_item, self._tf_cluster) @contextlib.contextmanager def Provision(allow_soft_placement=True, disable_detailed_stats=True, disable_timeline=True, devices=None): cluster = Cluster(allow_soft_placement, disable_detailed_stats, disable_timeline, devices) yield cluster cluster.Shutdown()
tensorflow-master
tensorflow/python/grappler/cluster.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """A tool for cost analysis.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys from google.protobuf import message from google.protobuf import text_format from tensorflow.contrib.fused_conv.ops import gen_fused_conv2d_bias_activation_op # pylint: disable=unused-import from tensorflow.core.framework import graph_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.grappler import cost_analyzer from tensorflow.python.grappler import tf_optimizer from tensorflow.python.platform import app from tensorflow.python.platform import gfile from tensorflow.python.training import saver def get_metagraph(): """Constructs and returns a MetaGraphDef from the input file.""" with gfile.GFile(FLAGS.input) as input_file: input_data = input_file.read() try: saved_model = saved_model_pb2.SavedModel() text_format.Merge(input_data, saved_model) meta_graph = saved_model.meta_graphs[0] except text_format.ParseError: try: saved_model.ParseFromString(input_data) meta_graph = saved_model.meta_graphs[0] except message.DecodeError: try: meta_graph = meta_graph_pb2.MetaGraphDef() text_format.Merge(input_data, meta_graph) except text_format.ParseError: try: meta_graph.ParseFromString(input_data) except message.DecodeError: try: graph_def = graph_pb2.GraphDef() text_format.Merge(input_data, graph_def) except text_format.ParseError: try: graph_def.ParseFromString(input_data) except message.DecodeError: raise ValueError("Invalid input file.") importer.import_graph_def(graph_def, name="") graph = ops.get_default_graph() meta_graph = saver.export_meta_graph( graph_def=graph.as_graph_def(), graph=graph) if FLAGS.fetch is not None: fetch_collection = meta_graph_pb2.CollectionDef() for fetch in FLAGS.fetch.split(","): fetch_collection.node_list.value.append(fetch) meta_graph.collection_def["train_op"].CopyFrom(fetch_collection) return meta_graph def main(_): metagraph = get_metagraph() config = config_pb2.ConfigProto() if FLAGS.rewriter_config is not None: text_format.Merge(FLAGS.rewriter_config, config.graph_options.rewrite_options) optimized_graph = tf_optimizer.OptimizeGraph(config, metagraph) metagraph.graph_def.CopyFrom(optimized_graph) report = cost_analyzer.GenerateCostReport(metagraph, FLAGS.per_node_report, FLAGS.verbose) print(report) if FLAGS.memory_report: report = cost_analyzer.GenerateMemoryReport(metagraph) print(report) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--input", type=str, default=None, help="Input file path. Accept SavedModel, MetaGraphDef, and GraphDef in " "either binary or text format.") parser.add_argument( "--fetch", type=str, default=None, help="The names of the fetch node delimited by comma.") parser.add_argument( "--rewriter_config", type=str, default=None, help="Configuration for the grappler optimizers, described as a " "RewriterConfig protocol buffer. Usage example 1: " "--rewriter_config='optimize_tensor_layout: true " "disable_model_pruning: true'. Usage example 2: " "--rewriter_config='optimizers: \"constfold\" optimizers: \"layout\"'") parser.add_argument( "--per_node_report", action="store_true", help="Generate per-node report. By default the report contains stats " "aggregated on a per op type basis, per_node_report adds results " "for each individual node to the report.") parser.add_argument( "--memory_report", action="store_true", help="Generate memory usage report.") parser.add_argument( "--verbose", action="store_true", help="Generate verbose reports. By default, succinct reports are used.") FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-master
tensorflow/python/grappler/cost_analyzer_tool.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Grappler Constant Folding.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class ConstantFoldingTest(test.TestCase): # See b/76008022. def testScanInsideWhile(self): def loop_cond(idx_step, *unused_args): return idx_step < 1 def loop_body(idx_step, y): x = array_ops.zeros([10, 20, 30], dtype=dtypes.float32) x = functional_ops.scan( math_ops.add, x, initializer=array_ops.zeros([20, 30], dtype=dtypes.float32), back_prop=False, parallel_iterations=1) with ops.device('/cpu:0'): y = array_ops.identity(x) return idx_step + 1, y if test.is_gpu_available(cuda_only=True): init_y = array_ops.zeros([10, 20, 30], dtype=dtypes.float32) _, y = control_flow_ops.while_loop( loop_cond, loop_body, loop_vars=[0, init_y], back_prop=False, parallel_iterations=1) y_v = self.evaluate(y) self.assertAllEqual(np.zeros([10, 20, 30]), y_v) if __name__ == '__main__': test.main()
tensorflow-master
tensorflow/python/grappler/constant_folding_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Graph Placer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.framework import errors from tensorflow.python.framework import ops as tf_ops from tensorflow.python.grappler import cluster as gcluster from tensorflow.python.grappler import hierarchical_controller from tensorflow.python.grappler import item as gitem from tensorflow.python.grappler import tf_optimizer from tensorflow.python.training import training def PlaceGraph(metagraph, cluster=None, allotted_time=3600, hparams=None, verbose=False): """Place the provided metagraph. Args: metagraph: the metagraph to place. cluster: an optional set of hardware resource to optimize the placement for. If none is specified, we'll optimize the placement for the hardware available on the local machine. allotted_time: the maximum amount to time in seconds to spend optimizing the placement. hparams: hyperparameters used to fine tune the placer. verbose: prints debug information if True. Returns: The placed metagraph. """ if cluster is None: cluster = gcluster.Cluster() # Optimize the metagraph to speedup the placement config = config_pb2.ConfigProto() optimized_graph = tf_optimizer.OptimizeGraph( config, metagraph, verbose=verbose, cluster=cluster) optimized_metagraph = meta_graph_pb2.MetaGraphDef() optimized_metagraph.CopyFrom(metagraph) optimized_metagraph.graph_def.CopyFrom(optimized_graph) item = gitem.Item(optimized_metagraph) # Measure the runtime achievable with the original placement. try: _, original_run_time, _ = cluster.MeasureCosts(item) if verbose: print("Runtime for original placement: " + str(original_run_time)) except errors.OpError as e: if verbose: print("Original placement isn't feasible: " + str(e)) original_run_time = hparams.failing_signal if hparams is None: hparams = hierarchical_controller.hierarchical_controller_hparams() # We run with a single child hparams.num_children = 1 with tf_ops.Graph().as_default(): # Place all the nodes of the controller on the CPU. We don't want them to # fight for accelerator memory with the model to optimize. with tf_ops.device("/device:CPU:0"): model = hierarchical_controller.HierarchicalController( hparams, item, cluster) ops = model.build_controller() session_creator = training.ChiefSessionCreator() with training.MonitoredSession(session_creator=session_creator) as sess: start_time = time.time() current_time = start_time while current_time - start_time < allotted_time: grouping_actions = model.generate_grouping(sess) input_to_seq2seq = model.create_group_embeddings( grouping_actions, verbose=verbose) model.generate_placement(input_to_seq2seq, sess) try: run_time = model.eval_placement( sess, verbose=verbose) except errors.OpError as e: if verbose: print("Failed to run graph:" + str(e)) run_time = hparams.failing_signal updated = model.update_reward(sess, run_time, verbose=verbose) if updated and run_time < original_run_time: if verbose: print("Found better placement, with runtime " + str(run_time)) model.export_placement(metagraph) model.process_reward(sess) current_time = time.time() return metagraph
tensorflow-master
tensorflow/python/grappler/graph_placer.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the cost analyzer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.grappler import model_analyzer from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class PyWrapOptimizeGraphTest(test.TestCase): @test_util.run_deprecated_v1 def testBasic(self): """Make sure arguments can be passed correctly.""" a = constant_op.constant([10, 11], name="a") b = constant_op.constant([10], name="b") c = math_ops.add(a, b, name="c") d = math_ops.add_n([a, c], name="d") train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(d) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) report = model_analyzer.GenerateModelReport(mg) # Check the report headers self.assertTrue(b"a [Const]" in report) self.assertTrue(b"a [Const]" in report) self.assertTrue(b"c [Add]" in report) self.assertTrue(b"d [AddN]" in report) # Also print the report to make it easier to debug print("{}".format(report)) @test_util.run_deprecated_v1 def testDebugMode(self): """Make sure arguments can be passed correctly.""" a = constant_op.constant([10, 11], name="a") b = constant_op.constant([10], name="b") c = math_ops.add(a, b, name="c") train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) train_op.append(c) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) report = model_analyzer.GenerateModelReport(mg, debug=True) # Check the report headers self.assertTrue(b"input 0 (int32) has known value" in report) self.assertTrue(b"input 1 (int32) has known value" in report) # Also print the report to make it easier to debug print("{}".format(report)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/grappler/model_analyzer_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """A tool that finds all subgraphs of a given size in a TF graph. The subgraph patterns are sorted by occurrence, and only the transitive fanin part of the graph with regard to the fetch nodes is considered. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys from tensorflow.python import pywrap_tensorflow as tf_wrap from tensorflow.python.platform import app def main(_): tf_wrap.GraphAnalyzer(FLAGS.input, FLAGS.n) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--input", type=str, default=None, help="Input file path for a TensorFlow MetaGraphDef.") parser.add_argument( "--n", type=int, default=None, help="The size of the subgraphs.") FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-master
tensorflow/python/grappler/graph_analyzer.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Provides a proper python API for the symbols exported through swig.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow as tf_wrap from tensorflow.python.grappler import cluster as gcluster from tensorflow.python.grappler import item as gitem def GenerateCostReport(metagraph, per_node_report=False, verbose=False, cluster=None): """Analyze the cost of each TensorFlow op and node in the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. per_node_report: by default the report contains stats aggregated on a per op type basis, setting per_node_report to True adds results for each individual node to the report. verbose: Prints out the entire operation proto instead of a summary table. cluster: Analyze the costs using the specified cluster, or the local machine if no cluster was specified. Returns: A string of cost report. """ if cluster is None: cluster = gcluster.Cluster(disable_detailed_stats=False) ret_from_swig = tf_wrap.GenerateCostReport(metagraph.SerializeToString(), per_node_report, verbose, cluster.tf_cluster) return ret_from_swig def GenerateMemoryReport(metagraph, detailed_report=True, cluster=None): """Analyze the peak memory usage for the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. detailed_report: print the live tensors in addition to the peak memory usage. cluster: Analyze the memory using the specified cluster, or the local machine if no cluster was specified. Returns: A string with the formatted memory usage. """ if cluster is None: cluster = gcluster.Cluster( disable_detailed_stats=True, disable_timeline=True) item = gitem.Item(metagraph) peak_usage = cluster.DeterminePeakMemoryUsage(item) report = "" for device, snapshot in peak_usage.items(): peak_usage = snapshot[0] report += "Peak usage for device " + device + ": " + str( peak_usage) + " bytes\n" if detailed_report: live_tensors = snapshot[1] for tensor in live_tensors: op_name = tensor[0] output_id = tensor[1] mem_used = tensor[2] report += " " + str(op_name) + ":" + str(output_id) + " uses " + str( mem_used) + " bytes\n" return report
tensorflow-master
tensorflow/python/grappler/cost_analyzer.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A python interface for Grappler items.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.grappler.costs import op_performance_data_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python import pywrap_tensorflow as tf_item class Item(object): """GrapplerItem.""" def __init__(self, metagraph, ignore_colocation=True, ignore_user_placement=False): """Creates an Item. Args: metagraph: a TensorFlow metagraph. ignore_colocation: if set, the tool will ignore all the colocation constraints generated by TensorFlow. ignore_user_placement: if set, all the placement annotations annotated in the metagraph will be ignored. Raises: ValueError: the metagraph is incomplete or invalid. """ self._metagraph = metagraph self._item_graph = meta_graph_pb2.MetaGraphDef() self._item_graph.CopyFrom(metagraph) self._ignore_colocation = ignore_colocation self._ignore_user_placement = ignore_user_placement self._tf_item = None self._BuildTFItem() def IdentifyImportantOps(self, sort_topologically=False): return tf_item.TF_IdentifyImportantOps(self.tf_item, sort_topologically) def GetOpProperties(self): ret_from_swig = tf_item.TF_GetOpProperties(self.tf_item) properties = {} for key, values in ret_from_swig.items(): prop = [] for value in values: prop.append( op_performance_data_pb2.OpInfo.TensorProperties.FromString(value)) properties[key] = prop return properties def GetColocationGroups(self): """Return a list of hard colocation constraints. All the nodes in a colocation tuple must be placed on the same device for the model to work. Returns: A list of colocation tuples. """ return tf_item.TF_GetColocationGroups(self.tf_item) @property def metagraph(self): return self._metagraph @property def tf_item(self): if self._item_graph != self._metagraph: self._BuildTFItem() self._item_graph.CopyFrom(self._metagraph) return self._tf_item def _BuildTFItem(self): self._tf_item = tf_item.TF_NewItem(self._metagraph.SerializeToString(), self._ignore_colocation, self._ignore_user_placement)
tensorflow-master
tensorflow/python/grappler/item.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Provides a proper python API for the symbols exported through swig.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow as tf_wrap def GenerateModelReport(metagraph, assume_valid_feeds=True, debug=False): """Report what's known statically about each node in the provided metagraph. Args: metagraph: A TensorFlow MetaGraphDef. assume_valid_feeds: If True, assume that the shape of the fed nodes is valid debug: Add some information useful for debugging. Returns: A string containing the report. """ ret_from_swig = tf_wrap.GenerateModelReport(metagraph.SerializeToString(), assume_valid_feeds, debug) return ret_from_swig
tensorflow-master
tensorflow/python/grappler/model_analyzer.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Facilities for creating multiple test combinations. Here is a simple example for testing various optimizers in Eager and Graph: class AdditionExample(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine(mode=["graph", "eager"], optimizer=[AdamOptimizer(), GradientDescentOptimizer()])) def testOptimizer(self, optimizer): ... f(optimizer)... This will run `testOptimizer` 4 times with the specified optimizers: 2 in Eager and 2 in Graph mode. The test is going to accept the same parameters as the ones used in `combine()`. The parameters need to match by name between the `combine()` call and the test signature. It is necessary to accept all parameters. See `OptionalParameter` for a way to implement optional parameters. `combine()` function is available for creating a cross product of various options. `times()` function exists for creating a product of N `combine()`-ed results. The execution of generated tests can be customized in a number of ways: - The test can be skipped if it is not running in the correct environment. - The arguments that are passed to the test can be additionaly transformed. - The test can be run with specific Python context managers. These behaviors can customized by providing instances of `TestCombination` to `generate()`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import OrderedDict import contextlib import re import types import unittest from absl.testing import parameterized import six from tensorflow.python.util import tf_inspect class TestCombination(object): """Customize the behavior of `generate()` and the tests that it executes. Here is sequence of steps for executing a test combination: 1. The test combination is evaluated for whether it should be executed in the given environment by calling `should_execute_combination`. 2. If the test combination is going to be executed, then the arguments for all combined parameters are validated. Some arguments can be handled in a special way. This is achieved by implementing that logic in `ParameterModifier` instances that returned from `parameter_modifiers`. 3. Before executing the test, `context_managers` are installed around it. """ def should_execute_combination(self, kwargs): """Indicates whether the combination of test arguments should be executed. If the environment doesn't satisfy the dependencies of the test combination, then it can be skipped. Arguments: kwargs: Arguments that are passed to the test combination. Returns: A tuple boolean and an optional string. The boolean False indicates that the test should be skipped. The string would indicate a textual description of the reason. If the test is going to be executed, then this method returns `None` instead of the string. """ del kwargs return (True, None) def parameter_modifiers(self): """Returns `ParameterModifier` instances that customize the arguments.""" return [] def context_managers(self, kwargs): """Return context managers for running the test combination. The test combination will run under all context managers that all `TestCombination` instances return. Arguments: kwargs: Arguments and their values that are passed to the test combination. Returns: A list of instantiated context managers. """ del kwargs return [] class ParameterModifier(object): """Customizes the behavior of a particular parameter.""" DO_NOT_PASS_TO_THE_TEST = object() def __init__(self, parameter_name=None): """Construct a parameter modifier that may be specific to a parameter. Arguments: parameter_name: A `ParameterModifier` instance may operate on a class of parameters or on a parameter with a particular name. Only `ParameterModifier` instances that are of a unique type or were initialized with a unique `parameter_name` will be executed. See `__eq__` and `__hash__`. """ object.__init__(self) self._parameter_name = parameter_name def modified_arguments(self, kwargs, requested_parameters): """Replace user-provided arguments before they are passed to a test. This makes it possible to adjust user-provided arguments before passing them to the test method. Arguments: kwargs: The combined arguments for the test. requested_parameters: The set of parameters that are defined in the signature of the test method. Returns: A dictionary with updates to `kwargs`. Keys with values set to `ParameterModifier.DO_NOT_PASS_TO_THE_TEST` are going to be deleted and not passed to the test. """ del kwargs, requested_parameters return {} def __eq__(self, other): """Compare `ParameterModifier` by type and `parameter_name`.""" if self is other: return True elif type(self) is type(other): return self._parameter_name == other._parameter_name else: return False def __ne__(self, other): return not self.__eq__(other) def __hash__(self): """Compare `ParameterModifier` by type or `parameter_name`.""" if self._parameter_name: return hash(self._parameter_name) else: return id(self.__class__) class OptionalParameter(ParameterModifier): """A parameter that is optional in `combine()` and in the test signature.""" def modified_arguments(self, kwargs, requested_parameters): if self._parameter_name in requested_parameters: return {} else: return {self._parameter_name: ParameterModifier.DO_NOT_PASS_TO_THE_TEST} def generate(combinations, test_combinations=()): """A decorator for generating combinations of a test method or a test class. Parameters of the test method must match by name to get the corresponding value of the combination. Tests must accept all parameters that are passed other than the ones that are `OptionalParameter`. Args: combinations: a list of dictionaries created using combine() and times(). test_combinations: a tuple of `TestCombination` instances that customize the execution of generated tests. Returns: a decorator that will cause the test method or the test class to be run under the specified conditions. Raises: ValueError: if any parameters were not accepted by the test method """ def decorator(test_method_or_class): """The decorator to be returned.""" # Generate good test names that can be used with --test_filter. named_combinations = [] for combination in combinations: # We use OrderedDicts in `combine()` and `times()` to ensure stable # order of keys in each dictionary. assert isinstance(combination, OrderedDict) name = "".join([ "_{}_{}".format("".join(filter(str.isalnum, key)), "".join(filter(str.isalnum, _get_name(value, i)))) for i, (key, value) in enumerate(combination.items()) ]) named_combinations.append( OrderedDict( list(combination.items()) + [("testcase_name", "_test{}".format(name))])) if isinstance(test_method_or_class, type): class_object = test_method_or_class class_object._test_method_ids = test_method_ids = {} for name, test_method in six.iteritems(class_object.__dict__.copy()): if (name.startswith(unittest.TestLoader.testMethodPrefix) and isinstance(test_method, types.FunctionType)): delattr(class_object, name) methods = {} parameterized._update_class_dict_for_param_test_case( class_object.__name__, methods, test_method_ids, name, parameterized._ParameterizedTestIter( _augment_with_special_arguments( test_method, test_combinations=test_combinations), named_combinations, parameterized._NAMED, name)) for method_name, method in six.iteritems(methods): setattr(class_object, method_name, method) return class_object else: test_method = _augment_with_special_arguments( test_method_or_class, test_combinations=test_combinations) return parameterized.named_parameters(*named_combinations)(test_method) return decorator def _augment_with_special_arguments(test_method, test_combinations): def decorated(self, **kwargs): """A wrapped test method that can treat some arguments in a special way.""" original_kwargs = kwargs.copy() # Skip combinations that are going to be executed in a different testing # environment. reasons_to_skip = [] for combination in test_combinations: should_execute, reason = combination.should_execute_combination( original_kwargs.copy()) if not should_execute: reasons_to_skip.append(" - " + reason) if reasons_to_skip: self.skipTest("\n".join(reasons_to_skip)) customized_parameters = [] for combination in test_combinations: customized_parameters.extend(combination.parameter_modifiers()) customized_parameters = set(customized_parameters) # The function for running the test under the total set of # `context_managers`: def execute_test_method(): requested_parameters = tf_inspect.getfullargspec(test_method).args for customized_parameter in customized_parameters: for argument, value in customized_parameter.modified_arguments( original_kwargs.copy(), requested_parameters).items(): if value is ParameterModifier.DO_NOT_PASS_TO_THE_TEST: kwargs.pop(argument, None) else: kwargs[argument] = value omitted_arguments = set(requested_parameters).difference( set(list(kwargs.keys()) + ["self"])) if omitted_arguments: raise ValueError("The test requires parameters whose arguments " "were not passed: {} .".format(omitted_arguments)) missing_arguments = set(list(kwargs.keys()) + ["self"]).difference( set(requested_parameters)) if missing_arguments: raise ValueError("The test does not take parameters that were passed " ": {} .".format(missing_arguments)) kwargs_to_pass = {} for parameter in requested_parameters: if parameter == "self": kwargs_to_pass[parameter] = self else: kwargs_to_pass[parameter] = kwargs[parameter] test_method(**kwargs_to_pass) # Install `context_managers` before running the test: context_managers = [] for combination in test_combinations: for manager in combination.context_managers( original_kwargs.copy()): context_managers.append(manager) if hasattr(contextlib, "nested"): # Python 2 # TODO(isaprykin): Switch to ExitStack when contextlib2 is available. with contextlib.nested(*context_managers): execute_test_method() else: # Python 3 with contextlib.ExitStack() as context_stack: for manager in context_managers: context_stack.enter_context(manager) execute_test_method() return decorated def combine(**kwargs): """Generate combinations based on its keyword arguments. Two sets of returned combinations can be concatenated using +. Their product can be computed using `times()`. Args: **kwargs: keyword arguments of form `option=[possibilities, ...]` or `option=the_only_possibility`. Returns: a list of dictionaries for each combination. Keys in the dictionaries are the keyword argument names. Each key has one value - one of the corresponding keyword argument values. """ if not kwargs: return [OrderedDict()] sort_by_key = lambda k: k[0] kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key)) first = list(kwargs.items())[0] rest = dict(list(kwargs.items())[1:]) rest_combined = combine(**rest) key = first[0] values = first[1] if not isinstance(values, list): values = [values] return [ OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key)) for v in values for combined in rest_combined ] def times(*combined): """Generate a product of N sets of combinations. times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4]) Args: *combined: N lists of dictionaries that specify combinations. Returns: a list of dictionaries for each combination. Raises: ValueError: if some of the inputs have overlapping keys. """ assert combined if len(combined) == 1: return combined[0] first = combined[0] rest_combined = times(*combined[1:]) combined_results = [] for a in first: for b in rest_combined: if set(a.keys()).intersection(set(b.keys())): raise ValueError("Keys need to not overlap: {} vs {}".format( a.keys(), b.keys())) combined_results.append(OrderedDict(list(a.items()) + list(b.items()))) return combined_results class NamedObject(object): """A class that translates an object into a good test name.""" def __init__(self, name, obj): object.__init__(self) self._name = name self._obj = obj def __getattr__(self, name): return getattr(self._obj, name) def __call__(self, *args, **kwargs): return self._obj(*args, **kwargs) def __repr__(self): return self._name def _get_name(value, index): return re.sub("0[xX][0-9a-fA-F]+", str(index), str(value))
tensorflow-master
tensorflow/python/distribute/test_combinations.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The step function abstraction represents a single training step.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import backprop from tensorflow.python.training import optimizer as optimizer_lib class Step(object): """Interface for performing each step of a training algorithm.""" def __init__(self, distribution): self._distribution = distribution @property def distribution(self): return self._distribution def initialize(self): return [] def __call__(self): """Perform one step of this training algorithm.""" raise NotImplementedError("must be implemented in descendants") # TODO(priyag): Add an method to access initialization and finalize ops. class StandardInputStep(Step): """Step with a standard implementation of input handling. Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model. """ def __init__(self, dataset_fn, distribution): super(StandardInputStep, self).__init__(distribution) self._iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn()) def initialize(self): return self._iterator.initialize() class StandardSingleLossStep(StandardInputStep): """A step function that implements a training step for a feed forward network. An instance of this class is intended to be used as a callable: ```python ... step = step_fn.StandardSingleLossStep( dataset, loss_fn, optimizer, distribution) # Run a single training step on a given DistributionStrategy: step(distribution) ... ``` Args: dataset_fn: a function that returns a tf.data Dataset that produces the input for the model. loss_fn: a function that takes a context and inputs as arguments. It returns the loss for those inputs. `context` is an instance of `values.MultiStepContext` that will be passed when `loss_fn` is run. `context` can be used to specify the outputs to be returned from `loss_fn`, among other things. optimizer: an optimizer that implements an update rule. distribution: a `DistributionStrategy` object. """ def __init__(self, dataset_fn, loss_fn, optimizer, distribution, iterations_per_step=1): super(StandardSingleLossStep, self).__init__(dataset_fn, distribution) self._loss_fn = loss_fn self._optimizer = optimizer self._iterations_per_step = iterations_per_step def __call__(self): with self._distribution.scope(): def step_fn(ctx, inputs): """Function to run one iteration with one input.""" gradients_fn = backprop.implicit_grad(self._loss_fn) gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn) grads_and_vars = self.distribution.extended.call_for_each_replica( gradients_fn, args=(ctx, inputs)) # If threads use layers, then we need to run the first step # sequentially, so that layers.build() is not executed in parallel. # Otherwise, multiple sets of mirrored variables are going to be # created. return self._optimizer._distributed_apply( # pylint: disable=protected-access self.distribution, grads_and_vars) # TODO(priyag): Return the outputs, context, etc as well. ctx = self.distribution.extended.experimental_run_steps_on_iterator( step_fn, self._iterator, self._iterations_per_step) return ctx.run_op
tensorflow-master
tensorflow/python/distribute/step_fn.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for some testing utils from strategy_test_lib.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import OrderedDict from absl.testing import parameterized from tensorflow.python.distribute import test_combinations as combinations from tensorflow.python.eager import test class TestingCombinationsTest(test.TestCase): def test_combine(self): self.assertEqual([{ "a": 1, "b": 2 }, { "a": 1, "b": 3 }, { "a": 2, "b": 2 }, { "a": 2, "b": 3 }], combinations.combine(a=[1, 2], b=[2, 3])) def test_arguments_sorted(self): self.assertEqual([ OrderedDict([("aa", 1), ("ab", 2)]), OrderedDict([("aa", 1), ("ab", 3)]), OrderedDict([("aa", 2), ("ab", 2)]), OrderedDict([("aa", 2), ("ab", 3)]) ], combinations.combine(ab=[2, 3], aa=[1, 2])) def test_combine_single_parameter(self): self.assertEqual([{ "a": 1, "b": 2 }, { "a": 2, "b": 2 }], combinations.combine(a=[1, 2], b=2)) def test_add(self): self.assertEqual( [{ "a": 1 }, { "a": 2 }, { "b": 2 }, { "b": 3 }], combinations.combine(a=[1, 2]) + combinations.combine(b=[2, 3])) def test_times(self): c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"]) c2 = combinations.combine(mode=["eager"], loss=["callable"]) c3 = combinations.combine(distribution=["d1", "d2"]) c4 = combinations.times(c3, c1 + c2) self.assertEqual([ OrderedDict([("distribution", "d1"), ("loss", "callable"), ("mode", "graph")]), OrderedDict([("distribution", "d1"), ("loss", "tensor"), ("mode", "graph")]), OrderedDict([("distribution", "d1"), ("loss", "callable"), ("mode", "eager")]), OrderedDict([("distribution", "d2"), ("loss", "callable"), ("mode", "graph")]), OrderedDict([("distribution", "d2"), ("loss", "tensor"), ("mode", "graph")]), OrderedDict([("distribution", "d2"), ("loss", "callable"), ("mode", "eager")]) ], c4) def test_times_variable_arguments(self): c1 = combinations.combine(mode=["graph", "eager"]) c2 = combinations.combine(optimizer=["adam", "gd"]) c3 = combinations.combine(distribution=["d1", "d2"]) c4 = combinations.times(c3, c1, c2) self.assertEqual([ OrderedDict([("distribution", "d1"), ("mode", "graph"), ("optimizer", "adam")]), OrderedDict([("distribution", "d1"), ("mode", "graph"), ("optimizer", "gd")]), OrderedDict([("distribution", "d1"), ("mode", "eager"), ("optimizer", "adam")]), OrderedDict([("distribution", "d1"), ("mode", "eager"), ("optimizer", "gd")]), OrderedDict([("distribution", "d2"), ("mode", "graph"), ("optimizer", "adam")]), OrderedDict([("distribution", "d2"), ("mode", "graph"), ("optimizer", "gd")]), OrderedDict([("distribution", "d2"), ("mode", "eager"), ("optimizer", "adam")]), OrderedDict([("distribution", "d2"), ("mode", "eager"), ("optimizer", "gd")]) ], c4) self.assertEqual( combinations.combine( mode=["graph", "eager"], optimizer=["adam", "gd"], distribution=["d1", "d2"]), c4) def test_overlapping_keys(self): c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"]) c2 = combinations.combine(mode=["eager"], loss=["callable"]) with self.assertRaisesRegexp(ValueError, ".*Keys.+overlap.+"): _ = combinations.times(c1, c2) @combinations.generate(combinations.combine(a=[1, 0], b=[2, 3], c=[1])) class CombineTheTestSuite(parameterized.TestCase): def test_add_things(self, a, b, c): self.assertLessEqual(3, a + b + c) self.assertLessEqual(a + b + c, 5) def test_add_things_one_more(self, a, b, c): self.assertLessEqual(3, a + b + c) self.assertLessEqual(a + b + c, 5) def not_a_test(self, a=0, b=0, c=0): del a, b, c self.fail() def _test_but_private(self, a=0, b=0, c=0): del a, b, c self.fail() # Check that nothing funny happens to a non-callable that starts with "_test". test_member = 0 if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/test_combinations_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Strategy and optimizer combinations for combinations.combine().""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import combinations from tensorflow.python.distribute.model_collection import simple_models simple_functional_model = combinations.NamedObject( "SimpleFunctionalModel", simple_models.SimpleFunctionalModel()) simple_sequential_model = combinations.NamedObject( "SimpleSequentialModel", simple_models.SimpleSequentialModel()) simple_subclass_model = combinations.NamedObject( "SimpleSubclassModel", simple_models.SimpleSubclassModel())
tensorflow-master
tensorflow/python/distribute/model_combinations.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for class Step.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute.single_loss_example import single_loss_example from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.ops import variables class SingleLossStepTest(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.times( strategy_combinations.distributions_and_v1_optimizers(), combinations.combine( mode=strategy_combinations.graph_and_eager_modes), combinations.combine(is_tpu=[False])) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations.optimizers_v1, mode=["graph"], is_tpu=[True])) def testTrainNetwork(self, distribution, optimizer_fn, is_tpu): with distribution.scope(): single_loss_step, layer = single_loss_example( optimizer_fn, distribution, use_bias=True, iterations_per_step=2) if context.executing_eagerly(): single_loss_step.initialize() run_step = single_loss_step else: with self.cached_session() as sess: sess.run(single_loss_step.initialize()) run_step = sess.make_callable(single_loss_step()) self.evaluate(variables.global_variables_initializer()) weights, biases = [], [] for _ in range(5): run_step() weights.append(self.evaluate(layer.kernel)) biases.append(self.evaluate(layer.bias)) error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1) is_not_increasing = all(y <= x for x, y in zip(error, error[1:])) self.assertTrue(is_not_increasing) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/step_fn_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test DistributionStrategy in the zero batch case.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.layers import normalization from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables from tensorflow.python.ops.losses import losses from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent class NormalizationTest(test.TestCase, parameterized.TestCase): @combinations.generate( combinations.combine( distribution=[ strategy_combinations.one_device_strategy, ], mode=["graph"], fused=[True, False])) def disabled_testBNWithZeroBatchInput(self, distribution, fused): with distribution.scope(), self.cached_session() as sess: bn_list = [] inputs = np.random.random((0, 4, 4, 3)) + 100 targets = np.random.random((0, 4, 4, 3)) inputs_placeholder = array_ops.placeholder( dtype=dtypes.float32, shape=[None, 4, 4, 3]) targets_placeholder = array_ops.placeholder( dtype=dtypes.float32, shape=[None, 4, 4, 3]) def step_fn(is_training, inputs, targets=None): bn = normalization.BatchNormalization( axis=3, epsilon=1e-3, momentum=0.9, fused=fused) bn_list.append(bn) outputs = bn.apply(inputs, training=is_training) if not is_training: return outputs loss = losses.mean_squared_error(targets, outputs) optimizer = gradient_descent.GradientDescentOptimizer(0.01) train_op = optimizer.minimize(loss) with ops.control_dependencies([train_op]): return array_ops.identity(loss) train_op = distribution.extended.call_for_each_replica( step_fn, args=(True, inputs_placeholder, targets_placeholder)) predict_op = distribution.extended.call_for_each_replica( step_fn, args=(False, inputs_placeholder)) bn = bn_list[0] self.evaluate(variables.global_variables_initializer()) # Check for initial statistics and weights. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) self.assertAllEqual([0, 0, 0], moving_mean) self.assertAllEqual([1, 1, 1], moving_var) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) self.assertAllEqual([1, 1, 1], np_gamma) self.assertAllEqual([0, 0, 0], np_beta) for _ in range(100): np_output, _, _ = sess.run([train_op] + bn.updates, { inputs_placeholder: inputs, targets_placeholder: targets }) self.assertEqual(0.0, np_output) # Verify that the statistics and weights are not changed after training. moving_mean, moving_var = self.evaluate( [bn.moving_mean, bn.moving_variance]) self.assertAllEqual([0, 0, 0], moving_mean) self.assertAllEqual([1, 1, 1], moving_var) np_gamma, np_beta = self.evaluate([bn.gamma, bn.beta]) self.assertAllEqual([1, 1, 1], np_gamma) self.assertAllEqual([0, 0, 0], np_beta) # Test inference. np_output = sess.run(predict_op, {inputs_placeholder: inputs}) self.assertEqual([], np_output.tolist()) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.one_device_strategy, ], mode=["eager"], fused=[True, False])) def testBNWithZeroBatchInput(self, distribution, fused): with distribution.scope(): inputs = np.random.random((0, 4, 4, 3)).astype(np.float32) + 100 targets = np.random.random((0, 4, 4, 3)).astype(np.float32) bn = normalization.BatchNormalization( axis=3, epsilon=1e-3, momentum=0.9, fused=fused) optimizer = gradient_descent.GradientDescentOptimizer(0.01) @def_function.function def train_step(): def step_fn(inputs, targets): with backprop.GradientTape() as tape: outputs = bn.apply(inputs, training=True) loss = losses.mean_squared_error(targets, outputs) grads = tape.gradient(loss, bn.variables) optimizer.apply_gradients(zip(grads, bn.variables)) return loss return distribution.experimental_run_v2( step_fn, args=(inputs, targets)) for _ in range(100): np_output = train_step().numpy() self.assertEqual(0.0, np_output) # Verify that the statistics and weights are not changed after training. self.assertAllEqual([0, 0, 0], bn.moving_mean.numpy()) self.assertAllEqual([1, 1, 1], bn.moving_variance.numpy()) self.assertAllEqual([1, 1, 1], bn.gamma.numpy()) self.assertAllEqual([0, 0, 0], bn.beta.numpy()) @def_function.function def test_step(): def step_fn(inputs): outputs = bn.apply(inputs, training=False) return outputs return distribution.experimental_run_v2( step_fn, args=(inputs,)) # Test inference. self.assertAllEqual(np.zeros(shape=(0, 4, 4, 3), dtype=np.float32), test_step().numpy()) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/zero_batch_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for class OneDeviceStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import strategy_test_lib from tensorflow.python.eager import context from tensorflow.python.eager import test @combinations.generate( combinations.combine( distribution=[ strategy_combinations.one_device_strategy, strategy_combinations.one_device_strategy_gpu ], mode=["eager", "graph"])) class OneDeviceStrategyTest( strategy_test_lib.DistributionTestBase, strategy_test_lib.OneDeviceDistributionTestBase): def testMinimizeLoss(self, distribution): if context.executing_eagerly(): self._test_minimize_loss_eager(distribution) else: self._test_minimize_loss_graph(distribution) def testReplicaId(self, distribution): self._test_replica_id(distribution) def testCallAndMergeExceptions(self, distribution): self._test_call_and_merge_exceptions(distribution) def testReplicateDataset(self, distribution): dataset_fn = lambda: dataset_ops.Dataset.range(10) expected_values = [[i] for i in range(10)] input_fn = self._input_fn_to_test_input_context( dataset_fn, expected_num_replicas_in_sync=1, expected_num_input_pipelines=1, expected_input_pipeline_id=0) self._test_input_fn_iterable(distribution, input_fn, expected_values) def testMakeInputFnIteratorWithDataset(self, distribution): dataset_fn = lambda: dataset_ops.Dataset.range(10) expected_values = [[i] for i in range(10)] input_fn = self._input_fn_to_test_input_context( dataset_fn, expected_num_replicas_in_sync=1, expected_num_input_pipelines=1, expected_input_pipeline_id=0) iterator = distribution.make_input_fn_iterator(input_fn) self._test_input_fn_iterator( iterator, distribution.extended.worker_devices, expected_values) def testMakeInputFnIteratorWithCallable(self, distribution): def fn(): dataset = dataset_ops.Dataset.range(10) it = dataset.make_one_shot_iterator() return it.get_next expected_values = [[i] for i in range(10)] input_fn = self._input_fn_to_test_input_context( fn, expected_num_replicas_in_sync=1, expected_num_input_pipelines=1, expected_input_pipeline_id=0) iterator = distribution.make_input_fn_iterator(input_fn) self._test_input_fn_iterator( iterator, distribution.extended.worker_devices, expected_values, test_reinitialize=False, ignore_order=True) def testNumpyDataset(self, distribution): self._test_numpy_dataset(distribution) def testRun(self, distribution): self._test_run(distribution) def testAllReduceSum(self, distribution): self._test_all_reduce_sum(distribution) def testAllReduceSumGradients(self, distribution): self._test_all_reduce_sum_gradients(distribution) def testAllReduceSumGradientTape(self, distribution): self._test_all_reduce_sum_gradient_tape(distribution) def testAllReduceMean(self, distribution): self._test_all_reduce_mean(distribution) def testAllReduceMeanGradients(self, distribution): self._test_all_reduce_mean_gradients(distribution) def testAllReduceMeanGradientTape(self, distribution): self._test_all_reduce_mean_gradient_tape(distribution) def testTrainableVariables(self, distribution): self._test_trainable_variable(distribution) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.one_device_strategy_on_worker_1, strategy_combinations.one_device_strategy_gpu_on_worker_1 ], mode=["eager", "graph"])) class OneDeviceStrategyOnRemoteWorkerTest( strategy_test_lib.DistributionTestBase, strategy_test_lib.OneDeviceDistributionTestBase): def testDeviceAndInputDeviceAreColocated(self, distribution): self._test_device_and_input_device_are_colocated(distribution) def testDeviceAndInputDeviceAreColocatedWithFunction(self, distribution): self._test_device_and_input_device_are_colocated_with_function(distribution) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/one_device_strategy_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities to construct a TF subgraph implementing distributed All-Reduce.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math from tensorflow.python.framework import device as device_lib from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nccl_ops def _flatten_tensors(tensors): """Check tensors for isomorphism and flatten. Args: tensors: list of T `tf.Tensor` which must all have the same shape. Returns: tensors: a list of T `tf.Tensor` which are flattened (1D) views of tensors shape: the original shape of each element of input tensors Raises: ValueError: tensors are empty or non-isomorphic or have unknown shape. """ if not tensors: raise ValueError("tensors cannot be empty") shape = tensors[0].shape for tensor in tensors: shape = shape.merge_with(tensor.shape) if not shape.is_fully_defined(): raise ValueError("Tensors must have statically known shape.") if len(shape) != 1: reshaped = [] for t in tensors: with ops.colocate_with(t): reshaped.append(array_ops.reshape(t, [-1])) tensors = reshaped return tensors, shape def _reshape_tensors(tensors, shape): """Reshape tensors flattened by _flatten_tensors. Args: tensors: list of T `tf.Tensor` of identical length 1D tensors. shape: list of integers describing the desired shape. Product of the elements must equal the length of each tensor. Returns: list of T `tf.Tensor` which are the reshaped inputs. """ reshaped = [] for t in tensors: with ops.colocate_with(t): reshaped.append(array_ops.reshape(t, shape)) return reshaped def _padded_split(tensor, pieces): """Like split for 1D tensors but pads-out case where len % pieces != 0. Args: tensor: T `tf.Tensor` that must be 1D. pieces: a positive integer specifying the number of pieces into which tensor should be split. Returns: list of T `tf.Tensor` of length pieces, which hold the values of thin input tensor, in order. The final tensor may be zero-padded on the end to make its size equal to those of all of the other tensors. Raises: ValueError: The input tensor is not 1D. """ shape = tensor.shape if 1 != len(shape): raise ValueError("input tensor must be 1D") tensor_len = shape.dims[0].value with ops.colocate_with(tensor): if tensor_len % pieces != 0: # pad to an even length chunk_size = 1 + tensor_len // pieces if pieces > tensor_len: # This is an edge case that should not come up in practice, # i.e. a different reduction algorithm would be better, # but we'll make it work just for completeness. pad_len = pieces - tensor_len extended_whole = array_ops.concat( [tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0) parts = array_ops.split(extended_whole, pieces) return parts, pad_len elif (pieces - 1) * chunk_size >= tensor_len: # Another edge case of limited real interest. pad_len = (pieces * chunk_size) % tensor_len extended_whole = array_ops.concat( [tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0) parts = array_ops.split(extended_whole, pieces) return parts, pad_len else: last_chunk_size = tensor_len - (pieces - 1) * chunk_size pad_len = chunk_size - last_chunk_size piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size] parts = array_ops.split(tensor, piece_lens) parts[-1] = array_ops.concat( [parts[-1], array_ops.zeros([pad_len], dtype=tensor.dtype)], 0) return parts, pad_len else: return array_ops.split(tensor, pieces), 0 def _strip_padding(tensors, pad_len): """Strip the suffix padding added by _padded_split. Args: tensors: list of T `tf.Tensor` of identical length 1D tensors. pad_len: number of elements to be stripped from the end of each tensor. Returns: list of T `tf.Tensor` which are the stripped inputs. Raises: ValueError: tensors must be a non-empty list of 1D tensors, and each must be longer than pad_len. """ if not tensors: raise ValueError("tensors cannot be empty") shape = tensors[0].shape if len(shape) > 1: raise ValueError("tensors must be 1D") prefix_len = int(shape[0] - pad_len) if prefix_len < 0: raise ValueError("pad_len longer than tensor") stripped = [] for t in tensors: with ops.colocate_with(t): stripped.append(array_ops.slice(t, [0], [prefix_len])) return stripped def _ragged_split(tensor, pieces): """Like split for 1D tensors but allows case where len % pieces != 0. Args: tensor: T `tf.Tensor` that must be 1D. pieces: a positive integer specifying the number of pieces into which tensor should be split. Returns: list of T `tf.Tensor` of length pieces, which hold the values of the input tensor, in order. The final tensor may be shorter than the others, which will all be of equal length. Raises: ValueError: input tensor must be 1D. """ shape = tensor.shape if 1 != len(shape): raise ValueError("input tensor must be 1D") tensor_len = shape.dims[0].value chunk_size = tensor_len // pieces with ops.colocate_with(tensor): if tensor_len != (pieces * chunk_size): # last piece will be short assert pieces > 1 last_chunk_size = tensor_len - ((pieces - 1) * chunk_size) assert last_chunk_size > 0 piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size] return array_ops.split(tensor, piece_lens) else: return array_ops.split(tensor, pieces) def _ring_permutations(num_workers, num_subchunks, gpu_perm): """"Generate an array of device index arrays, one for each subchunk. In the basic ring reduction algorithm there are size(T)/num_devices data chunks and each device process one chunk per tick, i.e. sending one chunk and receiving one chunk. The idea of subchunking is that each device processes num_subchunks smaller data regions per tick, and the ring rank permutation is different for each subchunk index so that a device is potentially sending to and receiving from num_subchunks different other devices at each tick. Where multiple independent data channels exist between devices, this strategy supplies a method of using them in parallel. Args: num_workers: number of worker tasks num_subchunks: number of subchunks into which to divide each per-GPU chunk. gpu_perm: an array of integers in [0, num_gpus-1] giving the default ring order of GPUs at each worker. Other permutations will be generated by rotating this array and splicing together per-worker instances. Raises: ValueError: the number of subchunks may not exceed the number of GPUs. Returns: pred_by_s_d: list of lists that maps (by index) from (subchunk, dev) to preceding device in the permutation for that subchunk. The device index of GPU i at worker j is i + (j * num_gpus). rank_by_s_d: list of lists that maps (by index) from (subchunk, dev) to local rank of device d in the permutation for that subchunk. """ num_gpus = len(gpu_perm) devices = num_workers * num_gpus if devices == 0: return [], [] if num_subchunks > num_gpus: raise ValueError( "num_subchunks %d must be <= num_gpus %d" % (num_subchunks, num_gpus)) rotation_interval = max(1, int(num_gpus / num_subchunks)) perms_by_s = [] for s in range(0, num_subchunks): full_order = [] offset = s * rotation_interval for w in range(0, num_workers): default_order = [(w * num_gpus) + i for i in gpu_perm] dev_order = default_order[offset:] + default_order[:offset] full_order += dev_order perms_by_s.append(full_order) pred_by_s_d = [[-1 for d in range(0, devices)] for s in range(0, num_subchunks)] rank_by_s_d = [[-1 for d in range(0, devices)] for s in range(0, num_subchunks)] for s in range(0, num_subchunks): for d in range(0, devices): for t in range(0, devices): if d == perms_by_s[s][t]: rank_by_s_d[s][d] = t pred_by_s_d[s][d] = perms_by_s[s][(t + devices - 1) % devices] break return (pred_by_s_d, rank_by_s_d) def build_ring_all_reduce(input_tensors, num_workers, num_subchunks, gpu_perm, red_op, un_op=None): """Construct a subgraph performing a ring-style all-reduce of input_tensors. Args: input_tensors: a list of T `tf.Tensor` objects, which must all have the same shape and type. num_workers: number of worker tasks spanned by input_tensors. num_subchunks: number of subchunks each device should process in one tick. gpu_perm: a list of ints giving a ring-wise rank ordering of GPUs at each worker. All workers must have the same number of GPUs with the same rank ordering. If NVLINK is available, this should be a ring order supported by NVLINK edges. red_op: a binary operator for elementwise reduction. un_op: an optional unary operator to apply to fully reduced values. Raises: ValueError: empty input_tensors or they don't all have same size. Returns: a list of T `tf.Tensor` identical sum-reductions of input_tensors. """ if len(input_tensors) < 2: raise ValueError("input_tensors must be length 2 or longer") input_tensors, shape = _flatten_tensors(input_tensors) devices = [t.device for t in input_tensors] (pred_by_s_d, rank_by_s_d) = _ring_permutations( num_workers, num_subchunks, gpu_perm) chunks_by_dev, pad_len = _build_ring_gather( input_tensors, devices, num_subchunks, pred_by_s_d, rank_by_s_d, red_op) if un_op: chunks_by_dev = _apply_unary_to_chunks(un_op, chunks_by_dev) output_tensors = _build_ring_scatter(pred_by_s_d, rank_by_s_d, chunks_by_dev) if pad_len > 0: output_tensors = _strip_padding(output_tensors, pad_len) if len(shape) != 1: output_tensors = _reshape_tensors(output_tensors, shape) return output_tensors def _build_ring_gather(input_tensors, devices, num_subchunks, pred_by_s_d, rank_by_s_d, red_op): """Construct a subgraph for the first (reduction) pass of ring all-reduce. Args: input_tensors: a list of T `tf.Tensor` 1D input tensors of same shape and type. devices: array of device name strings num_subchunks: number of subchunks each device should process in one tick. pred_by_s_d: as produced by _ring_permutations rank_by_s_d: as produced by _ring_permutations red_op: a binary operator for elementwise reduction Raises: ValueError: tensors must all be one dimensional. Returns: list of list of T `tf.Tensor` of (partially) reduced values where exactly num_subchunks chunks at each device are fully reduced. """ num_devices = len(input_tensors) if num_devices == 0: return [] if num_devices == 1: return input_tensors shape = input_tensors[0].shape if 1 != len(shape): raise ValueError("input tensors must be 1D") num_chunks = num_devices * num_subchunks num_ticks = num_devices - 1 # Initialize chunks_by_dev with splits of the input tensors. chunks_by_dev = [] split_pad_len = 0 for d in range(0, num_devices): with ops.device(devices[d]): splits, split_pad_len = _padded_split(input_tensors[d], num_chunks) chunks_by_dev.append(splits) # Reduction phase for tick in range(0, num_ticks): # One new partial reduction for every chunk new_partial_reductions = [None for _ in range(0, num_chunks)] # Compute reductions with respect to last tick's values for d in range(0, num_devices): with ops.device(devices[d]): for s in range(0, num_subchunks): rank = rank_by_s_d[s][d] seg_index = (rank + num_devices - (2 + tick)) % num_devices pred_dev = pred_by_s_d[s][d] chunk_index = (seg_index * num_subchunks) + s new_partial_reductions[chunk_index] = red_op( chunks_by_dev[pred_dev][chunk_index], chunks_by_dev[d][chunk_index]) # Update chunks_by_dev with the new values at the end of the tick. for d in range(0, num_devices): for s in range(0, num_subchunks): rank = rank_by_s_d[s][d] seg_index = (rank + num_devices - (2 + tick)) % num_devices chunk_index = (seg_index * num_subchunks) + s chunks_by_dev[d][chunk_index] = new_partial_reductions[chunk_index] return chunks_by_dev, split_pad_len def _apply_unary_to_chunks(f, chunks_by_dev): """Apply a unary op to each tensor in chunks_by_dev, on same device. Args: f: a unary function over T `tf.Tensor`. chunks_by_dev: list of lists of T `tf.Tensor`. Returns: new list of lists of T `tf.Tensor` with the same structure as chunks_by_dev containing the derived tensors. """ output = [] for x in chunks_by_dev: with ops.colocate_with(x[0]): output.append([f(t) for t in x]) return output def _build_ring_scatter(pred_by_s_d, rank_by_s_d, chunks_by_dev): """Construct subgraph for second (scatter) pass of ring all-reduce. Args: pred_by_s_d: as produced by _ring_permutations rank_by_s_d: as produced by _ring_permutations chunks_by_dev: list of list of T `tf.Tensor` indexed by ints (device, chunk) Raises: ValueError: chunks_by_dev is not well-formed Returns: list of T `tf.Tensor` which are the fully reduced tensors, one at each device corresponding to the outer dimension of chunks_by_dev. """ num_devices = len(chunks_by_dev) num_chunks = len(chunks_by_dev[0]) if 0 != num_chunks % num_devices: raise ValueError( "Expect number of chunks per device to be divisible by num_devices") num_subchunks = int(num_chunks / num_devices) num_ticks = num_devices - 1 for tick in range(0, num_ticks): passed_values = [None for _ in range(0, num_chunks)] for d in range(0, num_devices): with ops.colocate_with(chunks_by_dev[d][0]): for s in range(0, num_subchunks): rank = rank_by_s_d[s][d] seg_index = (rank + num_devices - (1 + tick)) % num_devices pred_dev = pred_by_s_d[s][d] chunk_index = (seg_index * num_subchunks) + s passed_values[chunk_index] = array_ops.identity( chunks_by_dev[pred_dev][chunk_index]) for d in range(0, num_devices): for s in range(0, num_subchunks): rank = rank_by_s_d[s][d] seg_index = (rank + num_devices - (1 + tick)) % num_devices chunk_index = (seg_index * num_subchunks) + s chunks_by_dev[d][chunk_index] = passed_values[chunk_index] # Join chunks at each device. output = [] for x in chunks_by_dev: with ops.colocate_with(x[0]): output.append(array_ops.concat(x, 0)) return output def build_recursive_hd_all_reduce(input_tensors, red_op, un_op=None): """Construct a subgraph for recursive halving-doubling all-reduce. The recursive halving-doubling algorithm is described in http://www.mcs.anl.gov/~thakur/papers/ijhpca-coll.pdf The concept is to arrange the participating n devices in a linear sequence where devices exchange data pairwise with one other device in each round. During the gather phase there are lg(n) rounds where devices exchange increasingly smaller sub-tensors with another device at increasingly greater distances, until at the top each device has 1/n of the fully reduced values. During the scatter phase each device exchanges its fully reduced sub-tensor (which doubles in length at each round) with one other device at increasingly smaller distances until each device has all of the fully reduced values. Note: this preliminary version requires that len(input_tensors) be a power of 2. TODO(tucker): relax this restriction. Also, the number of elements in each tensor must be divisible by 2^h where h is the number of hops in each phase. This will also be relaxed in the future with edge-case specific logic. Args: input_tensors: list of T `tf.Tensor` to be elementwise reduced. red_op: a binary elementwise reduction Op. un_op: an optional unary elementwise Op to apply to reduced values. Returns: list of T `tf.Tensor` which are the fully reduced tensors, one at each device of input_tensors. Raises: ValueError: num_devices not a power of 2, or tensor len not divisible by 2 the proper number of times. """ devices = [t.device for t in input_tensors] input_tensors, shape = _flatten_tensors(input_tensors) reduced_shards = _build_recursive_hd_gather(input_tensors, devices, red_op) if un_op: reduced_shards = [un_op(t) for t in reduced_shards] output_tensors = _build_recursive_hd_scatter(reduced_shards, devices) if len(shape) != 1: output_tensors = _reshape_tensors(output_tensors, shape) return output_tensors def _build_recursive_hd_gather(input_tensors, devices, red_op): """Construct the gather phase of recursive halving-doubling all-reduce. Args: input_tensors: list of T `tf.Tensor` to be elementwise reduced. devices: a list of strings naming the devices hosting input_tensors, which will also be used to host the (partial) reduction values. red_op: a binary elementwise reduction Op. Returns: list of T `tf.Tensor` which are the fully reduced tensor shards. Raises: ValueError: num_devices not a power of 2, or tensor len not divisible by 2 the proper number of times. """ num_devices = len(devices) num_hops = int(math.log(num_devices, 2)) if num_devices != (2 ** num_hops): raise ValueError("num_devices must be a power of 2") chunks = input_tensors for h in range(0, num_hops): span = 2 ** h group_size = span * 2 new_chunks = [[] for _ in devices] for d in range(0, num_devices): if (d % group_size) >= (group_size / 2): # skip right half of a pair continue left_dev = devices[d] right_dev = devices[d + span] left_split = array_ops.split(chunks[d], 2) right_split = array_ops.split(chunks[d+span], 2) with ops.device(left_dev): new_chunks[d] = red_op(left_split[0], right_split[0]) with ops.device(right_dev): new_chunks[d + span] = red_op(left_split[1], right_split[1]) chunks = new_chunks return chunks def _build_recursive_hd_scatter(input_tensors, devices): """Construct the scatter phase of recursive halving-doublng all-reduce. Args: input_tensors: list of T `tf.Tensor` that are fully-reduced shards. devices: a list of strings naming the devices on which the reconstituted full tensors should be placed. Returns: list of T `tf.Tensor` which are the fully reduced tensors. """ num_devices = len(devices) num_hops = int(math.log(num_devices, 2)) assert num_devices == (2 ** num_hops), "num_devices must be a power of 2" chunks = input_tensors for h in reversed(range(0, num_hops)): span = 2 ** h group_size = span * 2 new_chunks = [[] for _ in devices] for d in range(0, num_devices): if (d % group_size) >= (group_size / 2): # skip right half of a pair continue left_idx = d right_idx = d + span left_dev = devices[left_idx] right_dev = devices[right_idx] with ops.device(left_dev): new_chunks[left_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0) with ops.device(right_dev): new_chunks[right_idx] = array_ops.concat([chunks[left_idx], chunks[right_idx]], 0) chunks = new_chunks return chunks def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None): """Construct a subgraph for shuffle all-reduce. Shuffle reduce is essentially the algorithm implemented when using parameter servers. Suppose tensor length is n, there are d devices and g gather shards. Each device sends a n/g length sub-tensor to each gather shard. The gather shards perform a reduction across d fragments, then broadcast the result back to each device. The devices then join the g fully reduced fragments they receive from the shards. The gather shards could perform d-1 pairwise reductions, or one d-way reduction. The first is better where reduction Op time is low compared to transmission time, the second better in the other case. Args: input_tensors: list of T @(tf.Tensor} values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: an n-array elementwise reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of T `tf.Tensor` which are the fully reduced tensors. """ input_tensors, shape = _flatten_tensors(input_tensors) dst_devices = [t.device for t in input_tensors] reduced_shards = _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op) output_tensors = _build_shuffle_scatter(reduced_shards, dst_devices) if len(shape) != 1: output_tensors = _reshape_tensors(output_tensors, shape) return output_tensors def _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op=None): """Construct the gather (concentrate and reduce) phase of shuffle all-reduce. Args: input_tensors: list of T @(tf.Tensor} values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: the binary reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of T `tf.Tensor` which are the fully reduced shards. Raises: ValueError: inputs not well-formed. """ num_source_devices = len(input_tensors) num_gather_devices = len(gather_devices) shape = input_tensors[0].shape if len(shape) != 1: raise ValueError("input_tensors must be 1D") shards_by_source = [] for d in range(0, num_source_devices): with ops.colocate_with(input_tensors[d]): shards_by_source.append( _ragged_split(input_tensors[d], num_gather_devices)) reduced_shards = [] for d in range(0, num_gather_devices): with ops.device(gather_devices[d]): values = [s[d] for s in shards_by_source] red_shard = red_op(values) if un_op: red_shard = un_op(red_shard) reduced_shards.append(red_shard) return reduced_shards def _build_shuffle_scatter(reduced_shards, dst_devices): """Build the scatter phase of shuffle all-reduce. Args: reduced_shards: list of T @(tf.Tensor} fully reduced shards dst_devices: list of names of devices at which the fully-reduced value should be reconstituted. Returns: list of T `tf.Tensor` scattered tensors. """ num_devices = len(dst_devices) out_tensors = [] for d in range(0, num_devices): with ops.device(dst_devices[d]): out_tensors.append(array_ops.concat(reduced_shards, 0)) return out_tensors def _split_by_task(devices, values): """Partition devices and values by common task. Args: devices: list of device name strings values: list of T `tf.tensor` of same length as devices. Returns: (per_task_devices, per_task_values) where both values are lists of lists with isomorphic structure: the outer list is indexed by task, and the inner list has length of the number of values belonging to that task. per_task_devices contains the specific devices to which the values are local, and per_task_values contains the corresponding values. Raises: ValueError: devices must be same length as values. """ num_devices = len(devices) if num_devices != len(values): raise ValueError("len(devices) must equal len(values)") per_task_devices = collections.OrderedDict() per_task_values = collections.OrderedDict() for d in range(num_devices): d_spec = device_lib.DeviceSpec.from_string(devices[d]) if not hasattr(d_spec, "task") or d_spec.task is None: assert False, "failed to parse device %s" % devices[d] index = (d_spec.job or "localhost", d_spec.replica or 0, d_spec.task) if index not in per_task_devices: per_task_devices[index] = [] per_task_values[index] = [] per_task_devices[index].append(devices[d]) per_task_values[index].append(values[d]) return (list(per_task_devices.values()), list(per_task_values.values())) def build_nccl_all_reduce(input_tensors, red_op, un_op=None): """Build a subgraph that does one full all-reduce, using NCCL. Args: input_tensors: list of T `tf.Tensor` of same-shape and type values to be reduced. red_op: binary elementwise reduction operator. Must be one of {tf.add} un_op: optional unary elementwise Op to apply to fully-reduce values. Returns: list of T `tf.Tensor` of reduced values. Raises: ValueError: red_op not supported. """ if red_op == math_ops.add: output_tensors = nccl_ops.all_sum(input_tensors) else: raise ValueError("red_op not supported by NCCL all-reduce: ", red_op) if un_op: un_op_wrapped = [] for t in output_tensors: with ops.colocate_with(t): un_op_wrapped.append(un_op(t)) output_tensors = un_op_wrapped return output_tensors def _build_nccl_hybrid(input_tensors, red_op, upper_level_f): """Construct a subgraph for NCCL hybrid all-reduce. Args: input_tensors: list of T `tf.Tensor` of same-shape and type values to be reduced. red_op: binary elementwise reduction operator. upper_level_f: function for reducing one value per worker, across workers. Returns: list of T `tf.Tensor` of reduced values. Raises: ValueError: inputs not well-formed. """ input_tensors, shape = _flatten_tensors(input_tensors) devices = [t.device for t in input_tensors] per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors) num_workers = len(per_worker_devices) up_values = [None for w in range(0, num_workers)] up_devices = up_values[:] down_values = up_values[:] # First stage: reduce within each worker using NCCL for w in range(0, num_workers): worker_values = build_nccl_all_reduce(per_worker_values[w], red_op) # NOTE: these reductions will not run to completion unless # every output value is used. Since we only need one, we # need to put control dependencies on the rest. with ops.control_dependencies(worker_values): with ops.device(worker_values[0].device): up_values[w] = array_ops.identity(worker_values[0]) up_devices[w] = per_worker_devices[w][0] # Second stage: Apply upper_level_f to reduce across first device at # each worker level_2_output = upper_level_f(up_values) # Third stage: propagate within each worker using NCCL Broadcast for w in range(0, num_workers): dst_tensors = [] with ops.device(per_worker_devices[w][0]): broadcast_src = nccl_ops.broadcast(array_ops.identity(level_2_output[w])) for d in per_worker_devices[w]: with ops.device(d): dst_tensors.append(array_ops.identity(broadcast_src)) down_values[w] = dst_tensors output_tensors = [v for sublist in down_values for v in sublist] if len(shape) != 1: output_tensors = _reshape_tensors(output_tensors, shape) return output_tensors def _reduce_non_singleton(input_tensors, red_f, un_op): """If len(input_tensors) > 1, apply red_f, else apply un_op.""" if len(input_tensors) > 1: return red_f(input_tensors) else: if not un_op: return input_tensors output_tensors = [] for t in input_tensors: with ops.colocate_with(t): output_tensors.append(un_op(t)) return output_tensors def build_nccl_then_ring(input_tensors, subdiv, red_op, un_op=None): """Construct hybrid of NCCL within workers, Ring across workers.""" def upper_builder(y): return build_ring_all_reduce(y, len(y), subdiv, [0], red_op, un_op) def upper_level_f(x): return _reduce_non_singleton(x, upper_builder, un_op) return _build_nccl_hybrid(input_tensors, red_op, upper_level_f) def build_nccl_then_recursive_hd(input_tensors, red_op, un_op=None): """Construct hybrid of NCCL within workers, Recursive-HD across workers.""" upper_level_f = lambda x: build_recursive_hd_all_reduce(x, red_op, un_op) return _build_nccl_hybrid(input_tensors, red_op, upper_level_f) def build_nccl_then_shuffle(input_tensors, gather_devices, nccl_red_op, shuffle_red_op, un_op=None): """Construct hybrid of NCCL within workers, Shuffle across workers.""" def upper_level_f(x): return build_shuffle_all_reduce(x, gather_devices, shuffle_red_op, un_op) return _build_nccl_hybrid(input_tensors, nccl_red_op, upper_level_f) def _build_shuffle_hybrid(input_tensors, gather_devices, red_op, upper_level_f): """Construct a subgraph for Shuffle hybrid all-reduce. Args: input_tensors: list of T `tf.Tensor` of same-shape and type values to be reduced. gather_devices: list of device names on which to host gather shards. red_op: binary elementwise reduction operator. upper_level_f: function for reducing one value per worker, across workers. Returns: list of T `tf.Tensor` of reduced values. Raises: ValueError: inputs not well-formed. """ input_tensors, shape = _flatten_tensors(input_tensors) # First stage, reduce across each worker using gather_devices. devices = [t.device for t in input_tensors] per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors) num_workers = len(per_worker_devices) up_values = [] if len(gather_devices) != num_workers: raise ValueError("For shuffle hybrid, gather_devices must contain one " "device per worker. ") for w in range(0, num_workers): reduced_shards = _build_shuffle_gather( per_worker_values[w], [gather_devices[w]], red_op) up_values.append(reduced_shards[0]) # Second stage, apply upper_level_f. level_2_output = upper_level_f(up_values) # Third stage, apply shuffle scatter at each worker. output_tensors = [] for w in range(0, num_workers): output_tensors += _build_shuffle_scatter( [level_2_output[w]], per_worker_devices[w]) if len(shape) != 1: output_tensors = _reshape_tensors(output_tensors, shape) return output_tensors def build_shuffle_then_ring(input_tensors, gather_devices, subdiv, red_n_op, red_op, un_op=None): """Construct hybrid of Shuffle within workers, Ring across workers.""" def upper_builder(tensors): return build_ring_all_reduce(tensors, len(tensors), subdiv, [0], red_op, un_op) def upper_level_f(tensors): return _reduce_non_singleton(tensors, upper_builder, un_op) return _build_shuffle_hybrid( input_tensors, gather_devices, red_n_op, upper_level_f) def build_shuffle_then_shuffle(input_tensors, first_gather_devices, second_gather_devices, red_op, un_op=None): """Construct hybrid of Shuffle within workers, Shuffle across workers.""" def upper_builder(tensors): return build_shuffle_all_reduce(tensors, second_gather_devices, red_op, un_op) def upper_level_f(tensors): return _reduce_non_singleton(tensors, upper_builder, un_op) return _build_shuffle_hybrid( input_tensors, first_gather_devices, red_op, upper_level_f)
tensorflow-master
tensorflow/python/distribute/all_reduce.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base testing class for strategies that require multiple nodes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import copy import json import os import six import subprocess import sys import threading import numpy as np _portpicker_import_error = None try: import portpicker # pylint: disable=g-import-not-at-top except ImportError as _error: # pylint: disable=invalid-name _portpicker_import_error = _error portpicker = None # pylint: disable=g-import-not-at-top from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import coordinator from tensorflow.python.training import server_lib from tensorflow.python.util import nest original_run_std_server = dc._run_std_server # pylint: disable=protected-access ASSIGNED_PORTS = set() lock = threading.Lock() def pick_unused_port(): """Returns an unused and unassigned local port.""" if _portpicker_import_error: raise _portpicker_import_error # pylint: disable=raising-bad-type global ASSIGNED_PORTS with lock: while True: port = portpicker.pick_unused_port() if port > 10000 and port not in ASSIGNED_PORTS: ASSIGNED_PORTS.add(port) logging.info('Using local port %r', port) return port def _create_cluster(num_workers, num_ps, has_chief=False, has_eval=False, protocol='grpc', worker_config=None, ps_config=None, eval_config=None): """Creates and starts local servers and returns the cluster_spec dict.""" if _portpicker_import_error: raise _portpicker_import_error # pylint: disable=raising-bad-type worker_ports = [pick_unused_port() for _ in range(num_workers)] ps_ports = [pick_unused_port() for _ in range(num_ps)] cluster_dict = {} if num_workers > 0: cluster_dict['worker'] = ['localhost:%s' % port for port in worker_ports] if num_ps > 0: cluster_dict['ps'] = ['localhost:%s' % port for port in ps_ports] if has_eval: cluster_dict['evaluator'] = ['localhost:%s' % pick_unused_port()] if has_chief: cluster_dict['chief'] = ['localhost:%s' % pick_unused_port()] cs = server_lib.ClusterSpec(cluster_dict) for i in range(num_workers): server_lib.Server( cs, job_name='worker', protocol=protocol, task_index=i, config=worker_config, start=True) for i in range(num_ps): server_lib.Server( cs, job_name='ps', protocol=protocol, task_index=i, config=ps_config, start=True) if has_chief: server_lib.Server( cs, job_name='chief', protocol=protocol, task_index=0, config=worker_config, start=True) if has_eval: server_lib.Server( cs, job_name='evaluator', protocol=protocol, task_index=0, config=eval_config, start=True) return cluster_dict def create_in_process_cluster(num_workers, num_ps, has_chief=False, has_eval=False): """Create an in-process cluster that consists of only standard server.""" # Leave some memory for cuda runtime. gpu_mem_frac = 0.7 / (num_workers + int(has_chief) + int(has_eval)) worker_config = config_pb2.ConfigProto() worker_config.gpu_options.per_process_gpu_memory_fraction = gpu_mem_frac # Enable collective ops which has no impact on non-collective ops. # TODO(yuefengz, tucker): removing this after we move the initialization of # collective mgr to the session level. if has_chief: worker_config.experimental.collective_group_leader = ( '/job:chief/replica:0/task:0') else: worker_config.experimental.collective_group_leader = ( '/job:worker/replica:0/task:0') ps_config = config_pb2.ConfigProto() ps_config.device_count['GPU'] = 0 eval_config = config_pb2.ConfigProto() eval_config.experimental.collective_group_leader = '' # Create in-process servers. Once an in-process tensorflow server is created, # there is no way to terminate it. So we create one cluster per test process. # We could've started the server in another process, we could then kill that # process to terminate the server. The reasons why we don't want multiple # processes are # 1) it is more difficult to manage these processes; # 2) there is something global in CUDA such that if we initialize CUDA in the # parent process, the child process cannot initialize it again and thus cannot # use GPUs (https://stackoverflow.com/questions/22950047). return _create_cluster( num_workers, num_ps=num_ps, has_chief=has_chief, has_eval=has_eval, worker_config=worker_config, ps_config=ps_config, eval_config=eval_config, protocol='grpc') def create_cluster_spec(test_obj, has_chief=False, num_workers=1, num_ps=0, has_eval=False): """Create a cluster spec with tasks with unused local ports.""" if _portpicker_import_error: raise _portpicker_import_error # pylint: disable=raising-bad-type cluster_spec = {} try: if has_chief: cluster_spec['chief'] = ['localhost:%s' % pick_unused_port()] if num_workers: cluster_spec['worker'] = [ 'localhost:%s' % pick_unused_port() for _ in range(num_workers) ] if num_ps: cluster_spec['ps'] = [ 'localhost:%s' % pick_unused_port() for _ in range(num_ps) ] if has_eval: cluster_spec['evaluator'] = ['localhost:%s' % pick_unused_port()] except portpicker.NoFreePortFoundError: test_obj.skipTest('Flakes in portpicker library do not represent ' 'TensorFlow errors.') return cluster_spec @contextlib.contextmanager def skip_if_grpc_server_cant_be_started(test_obj): try: yield except errors.UnknownError as e: if 'Could not start gRPC server' in e.message: test_obj.skipTest('Cannot start std servers.') else: raise class MultiWorkerTestBase(test.TestCase): """Base class for testing multi node strategy and dataset.""" @classmethod def setUpClass(cls): """Create a local cluster with 2 workers.""" cls._cluster_spec = create_in_process_cluster(num_workers=2, num_ps=1) cls._default_target = 'grpc://' + cls._cluster_spec['worker'][0] def setUp(self): # We only cache the session in one test because another test may have a # different session config or master target. self._thread_local = threading.local() self._thread_local.cached_session = None self._result = 0 self._lock = threading.Lock() @contextlib.contextmanager def session(self, graph=None, config=None, target=None): """Create a test session with master target set to the testing cluster. Creates a test session that connects to the local testing cluster. Args: graph: Optional graph to use during the returned session. config: An optional config_pb2.ConfigProto to use to configure the session. target: the target of session to connect to. Yields: A Session object that should be used as a context manager to surround the graph building and execution code in a test case. """ config = self._create_config(config) if target is None: target = self._default_target with session.Session(graph=graph, config=config, target=target) as sess: yield sess @contextlib.contextmanager # TODO(b/117573461): Overwrite self.evaluate() to use this function. def cached_session(self, graph=None, config=None, target=None): """Create a test session with master target set to the testing cluster. Creates a test session that connects to the local testing cluster. The session is only created once per test and then reused. Args: graph: Optional graph to use during the returned session. config: An optional config_pb2.ConfigProto to use to configure the session. target: the target of session to connect to. Yields: A Session object that should be used as a context manager to surround the graph building and execution code in a test case. Note that the session will live until the end of the test. """ config = self._create_config(config) if target is None: target = self._default_target if getattr(self._thread_local, 'cached_session', None) is None: self._thread_local.cached_session = session.Session( graph=None, config=config, target=target) sess = self._thread_local.cached_session with sess.graph.as_default(), sess.as_default(): yield sess def _create_config(self, config): if config is None: config = config_pb2.ConfigProto(allow_soft_placement=True) else: config = copy.deepcopy(config) # Don't perform optimizations for tests so we don't inadvertently run # gpu ops on cpu config.graph_options.optimizer_options.opt_level = -1 config.graph_options.rewrite_options.constant_folding = ( rewriter_config_pb2.RewriterConfig.OFF) return config def _run_client(self, client_fn, task_type, task_id, num_gpus, eager_mode, *args, **kwargs): if eager_mode: with context.eager_mode(): result = client_fn(task_type, task_id, num_gpus, *args, **kwargs) else: with context.graph_mode(): result = client_fn(task_type, task_id, num_gpus, *args, **kwargs) if np.all(result): with self._lock: self._result += 1 def _run_between_graph_clients(self, client_fn, cluster_spec, num_gpus, *args, **kwargs): """Runs several clients for between-graph replication. Args: client_fn: a function that needs to accept `task_type`, `task_id`, `num_gpus` and returns True if it succeeds. cluster_spec: a dict specifying jobs in a cluster. num_gpus: number of GPUs per worker. *args: will be passed to `client_fn`. **kwargs: will be passed to `client_fn`. """ threads = [] for task_type in ['chief', 'worker']: for task_id in range(len(cluster_spec.get(task_type, []))): t = threading.Thread( target=self._run_client, args=(client_fn, task_type, task_id, num_gpus, context.executing_eagerly()) + args, kwargs=kwargs) t.start() threads.append(t) for t in threads: t.join() self.assertEqual(self._result, len(threads)) class MockOsEnv(collections.Mapping): """A class that allows per-thread TF_CONFIG.""" def __init__(self, *args): self._dict = dict() self._thread_local = threading.local() super(MockOsEnv, self).__init__(*args) def get(self, key, default=None): if not hasattr(self._thread_local, 'dict'): self._thread_local.dict = dict() if key == 'TF_CONFIG': return dict.get(self._thread_local.dict, key, default) else: return dict.get(self._dict, key, default) def __getitem__(self, key): if not hasattr(self._thread_local, 'dict'): self._thread_local.dict = dict() if key == 'TF_CONFIG': return dict.__getitem__(self._thread_local.dict, key) else: return dict.__getitem__(self._dict, key) def __setitem__(self, key, val): if not hasattr(self._thread_local, 'dict'): self._thread_local.dict = dict() if key == 'TF_CONFIG': return dict.__setitem__(self._thread_local.dict, key, val) else: return dict.__setitem__(self._dict, key, val) def __iter__(self): if not hasattr(self._thread_local, 'dict'): self._thread_local.dict = dict() for x in self._thread_local.dict: yield x for x in self._dict: yield x def __len__(self): if not hasattr(self._thread_local, 'dict'): self._thread_local.dict = dict() return self._thread_local.dict.__len__() + self._dict.__len__() class IndependentWorkerTestBase(test.TestCase): """Testing infra for independent workers.""" def _make_mock_run_std_server(self): def _mock_run_std_server(*args, **kwargs): """Returns the std server once all threads have started it.""" with skip_if_grpc_server_cant_be_started(self): ret = original_run_std_server(*args, **kwargs) # Wait for all std servers to be brought up in order to reduce the chance # of remote sessions taking local ports that have been assigned to std # servers. Only call this barrier the first time this function is run for # each thread. if not getattr(self._thread_local, 'server_started', False): self._barrier.wait() self._thread_local.server_started = True return ret return _mock_run_std_server def setUp(self): self._mock_os_env = MockOsEnv() self._mock_context = test.mock.patch.object(os, 'environ', self._mock_os_env) self._coord = coordinator.Coordinator() super(IndependentWorkerTestBase, self).setUp() self._mock_context.__enter__() # threading local object to be shared by all threads self._thread_local = threading.local() def tearDown(self): self._mock_context.__exit__(None, None, None) super(IndependentWorkerTestBase, self).tearDown() def _task_thread(self, task_fn, tf_config, executing_eagerly, *args, **kwargs): with self._coord.stop_on_exception(): os.environ['TF_CONFIG'] = json.dumps(tf_config) # Force the new thread simulating a worker to run in the same context # mode as the parent thread does. if executing_eagerly: with context.eager_mode(): task_fn(*args, **kwargs) else: with ops.Graph().as_default(), context.graph_mode(): task_fn(*args, **kwargs) def _run_task_in_thread(self, task_fn, cluster_spec, task_type, task_id, *args, **kwargs): """Run tasks in a thread. If `tf_config` is provided, use it for the new thread; if not, construct one from `cluster_spec`, `task_type`, and `task_id`, and provide it to the new thread to be set as `TF_CONFIG` environment. Arguments: task_fn: The function to run in the new thread. cluster_spec: The cluster spec. task_type: The task type. task_id: The task id. *args: Additional positional arguments to provide to the thread's task_fn. **kwargs: Additional keyword arguments to provide to the thread's task_fn. If `tf_config` is provided, that dict will be used for the TF_CONFIG for the new thread. Returns: The thread that has started. """ tf_config = kwargs.pop('tf_config', None) if tf_config is None: if task_type: tf_config = { 'cluster': cluster_spec, 'task': { 'type': task_type, 'index': task_id } } else: tf_config = { 'cluster': cluster_spec, } t = threading.Thread( target=self._task_thread, args=(task_fn, tf_config, context.executing_eagerly()) + args, kwargs=kwargs) t.start() return t def run_multiple_tasks_in_threads(self, task_fn, cluster_spec, *args, **kwargs): # The task_fn should create std_server by itself. threads = {} for task_type in cluster_spec.keys(): threads[task_type] = [] for task_id in range(len(cluster_spec[task_type])): t = self._run_task_in_thread(task_fn, cluster_spec, task_type, task_id, *args, **kwargs) threads[task_type].append(t) return threads def join_independent_workers(self, worker_threads): with skip_if_grpc_server_cant_be_started(self): self._coord.join(worker_threads) class MultiWorkerMultiProcessTest(test.TestCase): """Testing infra for independent workers using multiple processes.""" def _run_task_in_process(self, cmd_args, cluster_spec, task_type, task_id): env = os.environ.copy() env['TF_CONFIG'] = json.dumps({ 'cluster': cluster_spec, 'task': { 'type': task_type, 'index': task_id } }) return subprocess.Popen( cmd_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) def run_multiple_tasks_in_processes(self, cmd_args, cluster_spec): """Run `cmd_args` in a process for each task in `cluster_spec`.""" processes = {} for task_type in cluster_spec.keys(): processes[task_type] = [] for task_id in range(len(cluster_spec[task_type])): p = self._run_task_in_process(cmd_args, cluster_spec, task_type, task_id) processes[task_type].append(p) return processes def join_independent_workers(self, worker_processes): return_codes = [] for p in nest.flatten(worker_processes): try: # Calling p.wait() will hang if we don't consume its output. p.communicate() except ValueError: # The output of the process may have been consumed, in which case # calling `p.communicate()` will raise a ValueError. pass finally: return_codes.append(p.returncode) for return_code in return_codes: self.assertEqual(return_code, 0) def stream_stderr(self, processes, print_only_first=False): """Consume stderr of all processes and print to stdout. To reduce the amount of logging, caller can set print_only_first to True. In that case, this function only prints stderr from the first process of each type. Arguments: processes: A dictionary from process type string -> list of processes. print_only_first: If true, only print output from first process of each type. """ def _stream_stderr_single_process(process, type_string, index, print_to_stdout): """Consume a single process's stderr and optionally print to stdout.""" while True: output = process.stderr.readline() if not output and process.poll() is not None: break if output and print_to_stdout: print('{}{} {}'.format(type_string, index, output.strip())) sys.stdout.flush() stream_threads = [] for process_type, process_list in six.iteritems(processes): for i in range(len(process_list)): print_to_stdout = (not print_only_first) or (i == 0) thread = threading.Thread( target=_stream_stderr_single_process, args=(process_list[i], process_type, i, print_to_stdout)) thread.start() stream_threads.append(thread) for thread in stream_threads: thread.join() def get_tf_config_task(): return json.loads(os.environ['TF_CONFIG'])['task'] def get_tf_config_cluster_spec(): return json.loads(os.environ['TF_CONFIG'])['cluster'] def get_task_type(): return get_tf_config_task()['type'] def get_task_index(): return get_tf_config_task()['index'] def is_chief(): return ('chief' not in get_tf_config_cluster_spec() and get_task_type() == 'worker' and get_task_index() == 0)
tensorflow-master
tensorflow/python/distribute/multi_worker_test_base.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for cross_device_utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.distribute import combinations from tensorflow.python.distribute import cross_device_utils from tensorflow.python.distribute import device_util from tensorflow.python.distribute import values as value_lib from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops class IndexedSlicesUtilsTest(test.TestCase, parameterized.TestCase): def _assert_values_equal(self, left, right): self.assertAllEqual( self.evaluate(ops.convert_to_tensor(left)), self.evaluate(ops.convert_to_tensor(right))) @test_util.run_in_graph_and_eager_modes def testAggregateTensors(self): t0 = constant_op.constant([[1., 2.], [0, 0], [3., 4.]]) t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]]) total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]]) result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1]) self._assert_values_equal(total, result) @test_util.run_in_graph_and_eager_modes def testAggregateIndexedSlices(self): t0 = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) t1 = math_ops._as_indexed_slices( constant_op.constant([[0., 0.], [5, 6], [7., 8.]])) total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]]) result = cross_device_utils.aggregate_tensors_or_indexed_slices([t0, t1]) self.assertIsInstance(result, ops.IndexedSlices) self._assert_values_equal(total, result) @test_util.run_in_graph_and_eager_modes def testDivideTensor(self): t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]]) n = 2 expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]]) result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n) self._assert_values_equal(expected, result) @test_util.run_in_graph_and_eager_modes def testDivideIndexedSlices(self): t = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) n = 2 expected = constant_op.constant([[0.5, 1.], [0, 0], [1.5, 2.]]) result = cross_device_utils.divide_by_n_tensors_or_indexed_slices(t, n) self.assertIsInstance(result, ops.IndexedSlices) self._assert_values_equal(expected, result) @test_util.run_in_graph_and_eager_modes def testIsIndexedSlices(self): t = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) self.assertTrue(cross_device_utils.contains_indexed_slices(t)) @test_util.run_in_graph_and_eager_modes def testContainsIndexedSlices_List(self): t0 = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) t1 = math_ops._as_indexed_slices( constant_op.constant([[0., 0.], [5, 6], [7., 8.]])) self.assertTrue(cross_device_utils.contains_indexed_slices([t0, t1])) @test_util.run_in_graph_and_eager_modes def testContainsIndexedSlices_Tuple(self): t0 = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) t1 = math_ops._as_indexed_slices( constant_op.constant([[0., 0.], [5, 6], [7., 8.]])) self.assertTrue(cross_device_utils.contains_indexed_slices((t0, t1))) @test_util.run_in_graph_and_eager_modes def testContainsIndexedSlices_PerReplica(self): t0 = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) t1 = math_ops._as_indexed_slices( constant_op.constant([[0., 0.], [5, 6], [7., 8.]])) device_map = value_lib.ReplicaDeviceMap(("/gpu:0", "/cpu:0")) per_replica = value_lib.PerReplica(device_map, (t0, t1)) self.assertTrue(cross_device_utils.contains_indexed_slices(per_replica)) @combinations.generate(combinations.combine( mode=["graph", "eager"], required_gpus=1)) def testCopyTensor(self): with ops.device("/cpu:0"): t = constant_op.constant([[1., 2.], [0, 0], [3., 4.]]) destination = "/gpu:0" result = cross_device_utils.copy_tensor_or_indexed_slices_to_device( t, destination) self._assert_values_equal(t, result) self.assertEqual(device_util.resolve(destination), device_util.resolve(result.device)) @combinations.generate(combinations.combine( mode=["graph", "eager"], required_gpus=1)) def testCopyIndexedSlices(self): with ops.device("/cpu:0"): t = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) destination = "/gpu:0" result = cross_device_utils.copy_tensor_or_indexed_slices_to_device( t, destination) self.assertIsInstance(result, ops.IndexedSlices) self._assert_values_equal(t, result) self.assertEqual(device_util.resolve(destination), device_util.resolve(result.device)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/cross_device_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Various classes representing distributed inputs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import six from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import multi_device_iterator_ops from tensorflow.python.data.util import structure from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import input_ops from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import values from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import device as tf_device from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import nest def get_distributed_dataset(dataset, input_workers, strategy, split_batch_by=None, input_context=None): """Returns a wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance. This is a common function that is used by all strategies to return the right tf.data.Dataset wrapped instance depending on the `dataset` argument type. Args: dataset: a tf.data.DatasetV1 or tf.data.DatasetV2 instance. input_workers: an InputWorkers object which specifies devices on which iterators should be created. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. split_batch_by: Optional integer. If present, we "split" each batch of the dataset by `split_batch_by` value. input_context: `InputContext` for sharding. Only pass this in for between graph multi-worker cases where there is only one `input_worker`. In these cases, we will shard based on the `input_pipeline_id` and `num_input_pipelines` in the `InputContext`. Returns: A wrapped tf.data.DatasetV1 or tf.data.DatasetV2 instance. """ if isinstance(dataset, dataset_ops.DatasetV1): return DistributedDatasetV1( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) else: return DistributedDataset( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) class InputWorkers(object): """A 1-to-many mapping from input worker devices to compute devices.""" def __init__(self, device_map, worker_device_pairs=None, logical_device=0): """Initialize an `InputWorkers` object. Args: device_map: A `DeviceMap` with the computation devices fed by the input workers. worker_device_pairs: A sequence of pairs: `(input device, a tuple of compute devices fed by that input device)`. logical_device: The logical device of `device_map` to feed. """ self._device_map = device_map self._logical_device = logical_device if worker_device_pairs is None: worker_device_pairs = (( device_util.canonicalize("/device:CPU:0"), device_map.logical_to_actual_devices(logical_device)),) self._input_worker_devices = tuple(d for d, _ in worker_device_pairs) self._fed_devices = tuple(tuple(device_util.canonicalize(d) for d in f) for _, f in worker_device_pairs) flattened = tuple(d for l in self._fed_devices for d in l) assert (flattened == device_map.logical_to_actual_devices(logical_device)), ( "flattened: %s logical device %d: %s" % (flattened, logical_device, device_map.logical_to_actual_devices(logical_device))) @property def device_map(self): return self._device_map @property def logical_device(self): return self._logical_device @property def num_workers(self): return len(self._input_worker_devices) @property def worker_devices(self): return self._input_worker_devices def compute_devices_for_worker(self, worker_index): return self._fed_devices[worker_index] def __repr__(self): devices = self.worker_devices debug_repr = ",\n".join(" %d %s: %s" % (i, devices[i], self._fed_devices[i]) for i in range(len(devices))) return "%s:{\n%s\n device_map: %s}" % ( self.__class__.__name__, debug_repr, self._device_map) def _get_next_as_optional(iterator, strategy, name=None): """Returns an empty dataset indicator and the next input from the iterator.""" replicas = [] worker_has_values = [] worker_devices = [] for i, worker in enumerate(iterator._input_workers.worker_devices): # pylint: disable=protected-access if name is not None: d = tf_device.DeviceSpec.from_string(worker) new_name = "%s_%s_%d" % (name, d.job, d.task) else: new_name = None with ops.device(worker): worker_has_value, next_element = ( iterator._iterators[i].get_next_as_list(new_name)) # pylint: disable=protected-access # Collective all-reduce requires explict devices for inputs. with ops.device("/cpu:0"): # Converting to integers for all-reduce. worker_has_value = math_ops.cast(worker_has_value, dtypes.int32) worker_devices.append(worker_has_value.device) worker_has_values.append(worker_has_value) # Make `replicas` a flat list of values across all replicas. replicas.append(next_element) # Run an all-reduce to see whether any worker has values. # TODO(b/131423105): we should be able to short-cut the all-reduce in some # cases. if getattr(strategy.extended, "_support_per_replica_values", True): worker_has_values = values.PerReplica( values.WorkerDeviceMap( worker_devices, num_replicas_per_worker=len( strategy.extended._input_workers._input_worker_devices)), # pylint: disable=protected-access worker_has_values) global_has_value = strategy.reduce( reduce_util.ReduceOp.SUM, worker_has_values, axis=None) else: assert len(worker_has_values) == 1 global_has_value = worker_has_values[0] global_has_value = array_ops.reshape( math_ops.cast(global_has_value, dtypes.bool), []) return global_has_value, replicas class DistributedIterator(object): """Common implementation for all input iterators.""" def __init__(self, input_workers, iterators, strategy): # TODO(b/133073708): we currently need a flag to control the usage because # there is a performance difference between get_next() and # get_next_as_optional(). self._enable_get_next_as_optional = getattr( strategy.extended, "experimental_enable_get_next_as_optional", False) assert isinstance(input_workers, InputWorkers) if not input_workers.worker_devices: raise ValueError("Should have at least one worker for input iterator.") self._iterators = iterators self._input_workers = input_workers self._strategy = strategy def next(self): return self.__next__() def __next__(self): try: return self.get_next() except errors.OutOfRangeError: raise StopIteration def get_next(self, name=None): """Returns the next input from the iterator for all replicas.""" if not self._enable_get_next_as_optional: replicas = [] for i, worker in enumerate(self._input_workers.worker_devices): if name is not None: d = tf_device.DeviceSpec.from_string(worker) new_name = "%s_%s_%d" % (name, d.job, d.task) else: new_name = None with ops.device(worker): # Make `replicas` a flat list of values across all replicas. replicas.extend( self._iterators[i].get_next_as_list_deprecated(new_name)) return values.regroup(self._input_workers.device_map, replicas) out_of_range_replicas = [] def out_of_range_fn(worker_index, device): """This function will throw an OutOfRange error.""" # As this will be only called when there is no data left, so calling # get_next() will trigger an OutOfRange error. data = self._iterators[worker_index].get_next(device) out_of_range_replicas.append(data) return data global_has_value, replicas = _get_next_as_optional(self, self._strategy) results = [] for i, worker in enumerate(self._input_workers.worker_devices): with ops.device(worker): devices = self._input_workers.compute_devices_for_worker(i) for j, device in enumerate(devices): with ops.device(device): # pylint: disable=undefined-loop-variable # pylint: disable=cell-var-from-loop # It is fine for the lambda to capture variables from the loop as # the lambda is executed in the loop as well. result = control_flow_ops.cond(global_has_value, lambda: replicas[i][j], lambda: out_of_range_fn(i, device)) # pylint: enable=cell-var-from-loop # pylint: enable=undefined-loop-variable results.append(result) replicas = results # Some dimensions in `replicas` will become unknown after we conditionally # return the real tensors or the dummy tensors. We fix the input shapes by # using the shapes from `out_of_range_replicas` because it is calling # get_next() inside. flattened_replicas = nest.flatten(replicas) for i, replica_data in enumerate(nest.flatten(out_of_range_replicas)): flattened_replicas[i].set_shape(replica_data.get_shape()) replicas = nest.pack_sequence_as(replicas, flattened_replicas) return values.regroup(self._input_workers.device_map, replicas) # We need a private initializer method for re-initializing multidevice # iterators when used with Keras training loops. If we don't reinitialize the # iterator we run into memory leak issues (b/123315763). @property def _initializer(self): init_ops = [] for it in self._iterators: init_ops.extend(it.initialize()) return control_flow_ops.group(init_ops) class DistributedIteratorV1(DistributedIterator): """Input Iterator for tf.data.DatasetV1.""" # TODO(anjalisridhar): Move to using `initializer` instead to be consistent # with tf.data iterator APIs. def initialize(self): """Initialze underlying iterators. Returns: A list of any initializer ops that should be run. """ return super(DistributedIteratorV1, self)._initializer @property def initializer(self): return self.initialize() # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. @property def output_classes(self): return self._iterators[0].output_classes # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. @property def output_shapes(self): return self._iterators[0].output_shapes # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. @property def output_types(self): return self._iterators[0].output_types # TODO(priyag): Remove when we switch to using `MultiDeviceIterator` for TPUs. def get_iterator(self, worker): for i, w in enumerate(self._input_workers.worker_devices): if worker == w: return self._iterators[i] return None class _IterableInput(object): """Base class for iterable inputs for distribution strategies.""" def __init__(self, input_workers): assert isinstance(input_workers, InputWorkers) self._input_workers = input_workers def __iter__(self): raise NotImplementedError("must be implemented in descendants") def _autograph_for_loop(self, extra_test, body, init_state): """Overload of for..in statement that iterates over the input.""" if extra_test is not None: raise NotImplementedError( "break and return statements are not yet supported in " "for ... in distributed input loops.") def reduce_body(state, iterate): new_state = body(iterate, *state) return new_state if init_state: return self.reduce(init_state, reduce_body) # TODO(anjalisridhar): This is a workaround for Dataset.reduce not allowing # empty state tensors - create a dummy state variable that remains unused. # Identify if we need this workaround and remove if unnecessary. def reduce_body_with_dummy_state(state, iterate): reduce_body((), iterate) return state self.reduce((constant_op.constant(0),), reduce_body_with_dummy_state) return () def reduce(self, initial_state, reduce_fn): """Execute a `reduce_fn` over all the elements of the input.""" iterator = iter(self) has_data, data = _get_next_as_optional(iterator, self._strategy) def cond(has_data, data, state): del data, state # Unused. return has_data def loop_body(has_data, data, state): """Executes `reduce_fn` in a loop till the dataset is empty.""" del has_data # Unused. # data is list of lists here. where each list corresponds to one worker. # TODO(b/130570614): Add support for the multiworker and TPU pods use # case. if self._input_workers.num_workers == 1: data = data[0] else: raise ValueError("Dataset iteration within a tf.function is" " not supported for multiple workers.") per_replica_data = values.regroup(self._input_workers.device_map, data) state = reduce_fn(state, per_replica_data) has_data, data = _get_next_as_optional(iterator, self._strategy) return has_data, data, state has_data, data, final_state = control_flow_ops.while_loop( cond, loop_body, [has_data, data, initial_state], parallel_iterations=1) return final_state class DistributedDataset(_IterableInput): """Wrapped tf.data.DatasetV2 that supports prefetching to multiple devices.""" def __init__(self, dataset, input_workers, strategy, split_batch_by=None, input_context=None): """Distribute the dataset on all workers. If `split_batch_by` is not None, we "split" each batch of the dataset by `split_batch_by` value. Args: dataset: `tf.data.Dataset` that will be used as the input source. input_workers: an `InputWorkers` object. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. split_batch_by: Optional integer. If present, we "split" each batch of the dataset by `split_batch_by` value. input_context: `InputContext` for sharding. Only pass this in for between graph multi-worker cases where there is only one `input_worker`. In these cases, we will shard based on the `input_pipeline_id` and `num_input_pipelines` in the `InputContext`. """ super(DistributedDataset, self).__init__(input_workers=input_workers) # We clone and shard the dataset on each worker. The current setup tries to # shard the dataset by files if possible so that each worker sees a # different subset of files. If that is not possible, will attempt to shard # the final input such that each worker will run the entire preprocessing # pipeline and only receive its own shard of the dataset. if split_batch_by: try: dataset = distribute._RebatchDataset(dataset, split_batch_by) # pylint: disable=protected-access except errors.InvalidArgumentError as e: if "without encountering a batch" in str(e): six.reraise( ValueError, ValueError( "Call the `batch` method on the input Dataset in order to be " "able to split your input across {} replicas.\n Please " "the tf.distribute.Strategy guide. {}".format( split_batch_by, e)), sys.exc_info()[2]) else: raise self._cloned_datasets = [] if input_context: # Between-graph where we rely on the input_context for sharding assert input_workers.num_workers == 1 dataset = input_ops.auto_shard_dataset( # pylint: disable=protected-access dataset, input_context.num_input_pipelines, input_context.input_pipeline_id) self._cloned_datasets.append(dataset) else: for i, worker in enumerate(input_workers.worker_devices): with ops.device(worker): cloned_dataset = dataset if not context.executing_eagerly(): cloned_dataset = input_ops._clone_dataset(dataset) # pylint: disable=protected-access cloned_dataset = cloned_dataset.with_options(dataset.options()) # TODO(b/129506833): Figure out between graph cases cloned_dataset = input_ops.auto_shard_dataset( # pylint: disable=protected-access cloned_dataset, len(input_workers.worker_devices), i) self._cloned_datasets.append(cloned_dataset) self._input_workers = input_workers # TODO(anjalisridhar): Identify if we need to set this property on the # iterator. self._element_structure = dataset._element_structure # pylint: disable=protected-access self._strategy = strategy def __iter__(self): worker_iterators = _create_iterators_per_worker(self._cloned_datasets, self._input_workers) iterator = DistributedIterator(self._input_workers, worker_iterators, self._strategy) iterator._element_structure = self._element_structure # pylint: disable=protected-access return iterator class DistributedDatasetV1(DistributedDataset): """Wrapped tf.data.DatasetV1 that supports prefetching to multiple devices.""" def __init__(self, dataset, input_workers, strategy, split_batch_by=None, input_context=None): self._input_workers = input_workers super(DistributedDatasetV1, self).__init__( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) def make_one_shot_iterator(self): """Get a one time use iterator for DistributedDatasetV1.""" return self._get_iterator() def make_initializable_iterator(self): """Get an initializable iterator for DistributedDatasetV1.""" # Eager mode generates already initialized iterators. Hence we cannot create # an initializable iterator. if context.executing_eagerly(): raise ValueError("Cannot create initializable iterator in Eager mode. " "Please use `make_one_shot_iterator` instead.") return self._get_iterator() def _get_iterator(self): worker_iterators = _create_iterators_per_worker(self._cloned_datasets, self._input_workers) iterator = DistributedIteratorV1(self._input_workers, worker_iterators, self._strategy) iterator._element_structure = self._element_structure # pylint: disable=protected-access return iterator # TODO(priyag): Add other replication modes. class DistributedDatasetsFromFunction(_IterableInput): """Inputs created from dataset function.""" def __init__(self, dataset_fn, input_workers, input_contexts, strategy): """Makes an iterable from datasets created by the given function. Args: dataset_fn: A function that returns a `Dataset` given an `InputContext`. input_workers: an `InputWorkers` object. input_contexts: A list of `InputContext` instances to be passed to call(s) to `dataset_fn`. Length and order should match worker order in `worker_device_pairs`. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. """ super(DistributedDatasetsFromFunction, self).__init__( input_workers=input_workers) if input_workers.num_workers != len(input_contexts): raise ValueError( "Number of input workers (%d) is not same as number of " "input_contexts (%d)" % (input_workers.num_workers, len(input_contexts))) self._dataset_fn = dataset_fn self._input_workers = input_workers self._input_contexts = input_contexts self._strategy = strategy def __iter__(self): iterators = [] for i, ctx in enumerate(self._input_contexts): worker = self._input_workers.worker_devices[i] with ops.device(worker): dataset = self._dataset_fn(ctx) devices = self._input_workers.compute_devices_for_worker(i) iterator = _SingleWorkerDatasetIterator(dataset, worker, devices) iterators.append(iterator) return DistributedIterator(self._input_workers, iterators, self._strategy) # TODO(anjalisridhar): This class will be soon be removed in favor of newer # APIs. class InputFunctionIterator(DistributedIteratorV1): """Iterator created from input function.""" def __init__(self, input_fn, input_workers, input_contexts, strategy): """Make an iterator for input provided via an input function. Currently implements PER_WORKER mode, in which the `input_fn` is called once on each worker. TODO(priyag): Add other replication modes. Args: input_fn: Input function that returns a `tf.data.Dataset` object. input_workers: an `InputWorkers` object. input_contexts: A list of `InputContext` instances to be passed to call(s) to `input_fn`. Length and order should match worker order in `worker_device_pairs`. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. """ assert isinstance(input_workers, InputWorkers) if input_workers.num_workers != len(input_contexts): raise ValueError( "Number of input workers (%d) is not same as number of " "input_contexts (%d)" % (input_workers.num_workers, len(input_contexts))) iterators = [] for i, ctx in enumerate(input_contexts): worker = input_workers.worker_devices[i] with ops.device(worker): result = input_fn(ctx) devices = input_workers.compute_devices_for_worker(i) if isinstance(result, dataset_ops.DatasetV2): iterator = _SingleWorkerDatasetIterator(result, worker, devices) elif callable(result): iterator = _SingleWorkerCallableIterator(result, worker, devices) else: raise ValueError( "input_fn must return a tf.data.Dataset or a callable.") iterators.append(iterator) super(InputFunctionIterator, self).__init__(input_workers, iterators, strategy) # TODO(anjalisridhar): This class will soon be removed and users should move # to using DistributedIterator. class DatasetIterator(DistributedIteratorV1): """Iterator created from input dataset.""" def __init__(self, dataset, input_workers, strategy, split_batch_by=None, input_context=None): """Make an iterator for the dataset on given devices. If `split_batch_by` is not None, we "split" each batch of the dataset by `split_batch_by` value. Args: dataset: `tf.data.Dataset` that will be used as the input source. input_workers: an `InputWorkers` object. strategy: a `tf.distribute.Strategy` object, used to run all-reduce to handle last partial batch. split_batch_by: Optional integer. If present, we "split" each batch of the dataset by `split_batch_by` value. input_context: `InputContext` for sharding. Only pass this in for between graph multi-worker cases where there is only one `input_worker`. In these cases, we will shard based on the `input_pipeline_id` and `num_input_pipelines` in the `InputContext`. """ dist_dataset = DistributedDatasetV1( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) worker_iterators = _create_iterators_per_worker( dist_dataset._cloned_datasets, input_workers) # pylint: disable=protected-access super(DatasetIterator, self).__init__( input_workers, worker_iterators, # pylint: disable=protected-access strategy) self._element_structure = dist_dataset._element_structure # pylint: disable=protected-access def _dummy_tensor_fn(value_structure): """A function to create dummy tensors from `value_structure`.""" def create_dummy_tensor(feature_shape, feature_type): """Create a dummy tensor with possible batch dimensions set to 0.""" # Ideally we should set the batch dimension to 0, however as in # DistributionStrategy we don't know the batch dimension, we try to # guess it as much as possible. If the feature has unknown dimensions, we # will set them to 0. If the feature shape is already static, we guess the # first dimension as batch dimension and set it to 0. dims = [] for dim in feature_shape.dims: if dim.value is None: dims.append(tensor_shape.Dimension(0)) else: dims.append(dim) if feature_shape.is_fully_defined() and dims: dims[0] = tensor_shape.Dimension(0) # Create the dummy tensor. dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type) return dummy_tensor result = [] # pylint: disable=protected-access for feature_shape, feature_type in zip(value_structure._flat_shapes, value_structure._flat_types): result.append(create_dummy_tensor(feature_shape, feature_type)) if isinstance(value_structure, structure.NestedStructure): result = nest.pack_sequence_as(value_structure._nested_structure, result) else: result = result[0] # pylint: enable=protected-access return result class _SingleWorkerDatasetIterator(object): """Iterator for a single `tf.data.Dataset`.""" def __init__(self, dataset, worker, devices): """Create iterator for the `dataset` to fetch data to worker's `devices` . `MultiDeviceIterator` is used to prefetch input to the devices on the given worker. Args: dataset: A `tf.data.Dataset` instance. worker: Worker on which ops should be created. devices: Distribute data from `dataset` to these devices. """ self._dataset = dataset self._worker = worker self._devices = devices self._make_iterator() def _make_iterator(self): """Make appropriate iterator on the dataset.""" with ops.device(self._worker): self._iterator = multi_device_iterator_ops.MultiDeviceIterator( self._dataset, self._devices) def get_next(self, device, name=None): """Get next element for the given device.""" del name with ops.device(self._worker): return self._iterator.get_next(device) def get_next_as_list_deprecated(self, name=None): """Get next element from the underlying iterator.""" del name with ops.device(self._worker): data_list = self._iterator.get_next() return data_list def get_next_as_list(self, name=None): """Get next element from underlying iterator. If there is no data left, a list of dummy tensors with possible batch dimensions set to 0 will be returned. Args: name: not used. Returns: A boolean tensor indicates whether there is any data in next element and the real data as the next element or a list of dummy tensors if no data left. """ del name with ops.device(self._worker): data_list = self._iterator.get_next_as_optional() result = [] for i, data in enumerate(data_list): # Place the condition op in the same device as the data so the data # doesn't need to be sent back to the worker. with ops.device(self._devices[i]): # As MultiDeviceIterator will fetch data in order, so we only need to # check if the first replica has value to see whether there is data # left for this single worker. if i == 0: worker_has_value = data.has_value() # pylint: disable=unnecessary-lambda # pylint: disable=cell-var-from-loop real_data = control_flow_ops.cond( data.has_value(), lambda: data.get_value(), lambda: _dummy_tensor_fn(data.value_structure)) result.append(real_data) # pylint: enable=cell-var-from-loop # pylint: enable=unnecessary-lambda return worker_has_value, result def initialize(self): """Initialze underlying iterator. In eager execution, this simply recreates the underlying iterator. In graph execution, it returns the initializer ops for the underlying iterator. Returns: A list of any initializer ops that should be run. """ if context.executing_eagerly(): self._iterator._eager_reset() # pylint: disable=protected-access return [] else: return [self._iterator.initializer] @property def output_classes(self): return dataset_ops.get_legacy_output_classes(self._iterator) @property def output_shapes(self): return dataset_ops.get_legacy_output_shapes(self._iterator) @property def output_types(self): return dataset_ops.get_legacy_output_types(self._iterator) class _SingleWorkerCallableIterator(object): """Iterator for a single tensor-returning callable.""" def __init__(self, fn, worker, devices): self._fn = fn self._worker = worker self._devices = devices def get_next(self, device, name=None): """Get next element for the given device from the callable.""" del device, name with ops.device(self._worker): return self._fn() def get_next_as_list_deprecated(self, name=None): """Get next element from the callable.""" del name with ops.device(self._worker): data_list = [self._fn() for _ in self._devices] return data_list def get_next_as_list(self, name=None): """Get next element from the callable.""" del name with ops.device(self._worker): data_list = [self._fn() for _ in self._devices] return constant_op.constant(True), data_list def initialize(self): # TODO(petebu) Should this throw an exception instead? return [] def _create_iterators_per_worker(worker_datasets, input_workers): """Create a multidevice iterator on each of the workers.""" assert isinstance(input_workers, InputWorkers) assert len(worker_datasets) == len(input_workers.worker_devices) iterators = [] for i, worker in enumerate(input_workers.worker_devices): with ops.device(worker): worker_devices = input_workers.compute_devices_for_worker(i) iterator = _SingleWorkerDatasetIterator(worker_datasets[i], worker, worker_devices) iterators.append(iterator) return iterators # TODO(sourabhbajaj): Remove this in lieu of distributed datasets def _get_batched_dataset(d): """Get the batched dataset from `d`.""" # pylint: disable=protected-access if isinstance(d, dataset_ops.DatasetV1Adapter): d = d._dataset if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)): return d elif isinstance(d, (dataset_ops.PrefetchDataset, dataset_ops._OptionsDataset)): return _get_batched_dataset(d._input_dataset) raise ValueError( "Unable to get batched dataset from the input dataset. `batch` " "`map_and_batch` need to be the last operations on the dataset. " "The batch operations can be followed by a prefetch.") def _get_batched_dataset_attributes(d): """Get `batch_size`, `drop_remainder` of dataset.""" # pylint: disable=protected-access assert isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)) if isinstance(d, dataset_ops.BatchDataset): batch_size = d._batch_size drop_remainder = d._drop_remainder elif isinstance(d, batching._MapAndBatchDataset): batch_size = d._batch_size_t drop_remainder = d._drop_remainder_t # pylint: enable=protected-access if tensor_util.is_tensor(batch_size): batch_size = tensor_util.constant_value(batch_size) if tensor_util.is_tensor(drop_remainder): drop_remainder = tensor_util.constant_value(drop_remainder) return batch_size, drop_remainder # TODO(sourabhbajaj): Remove this in lieu of distributed datasets def _get_dataset_attributes(dataset): """Get the underlying attributes from the dataset object.""" # pylint: disable=protected-access # First, get batch_size and drop_remainder from the dataset. We need # to walk back the dataset creation process and find the batched version in # order to get the attributes. batched_dataset = _get_batched_dataset(dataset) batch_size, drop_remainder = _get_batched_dataset_attributes(batched_dataset) # Second, prefetch buffer should be get from the original dataset. prefetch_buffer = None if isinstance(dataset, dataset_ops.PrefetchDataset): prefetch_buffer = dataset._buffer_size elif (isinstance(dataset, dataset_ops.DatasetV1Adapter) and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)): prefetch_buffer = dataset._dataset._buffer_size return batch_size, drop_remainder, prefetch_buffer class MultiStepContext(object): """A context object that can be used to capture things when running steps. This context object is useful when running multiple steps at a time using the `experimental_run_steps_on_iterator` API. For e.g. it allows the user's step function to specify which outputs to emit at what frequency. Currently it supports capturing output from the last step, as well as capturing non tensor outputs. In the future it will be augmented to support other use cases such as output each N steps. """ def __init__(self): """Initialize an output context. Returns: A context object. """ self._last_step_outputs = {} self._last_step_outputs_reduce_ops = {} self._non_tensor_outputs = {} @property def last_step_outputs(self): """A dictionary consisting of outputs to be captured on last step. Keys in the dictionary are names of tensors to be captured, as specified when `set_last_step_output` is called. Values in the dictionary are the tensors themselves. If `set_last_step_output` was called with a `reduce_op` for this output, then the value is the reduced value. Returns: A dictionary with last step outputs. """ return self._last_step_outputs def _set_last_step_outputs(self, outputs): """Replace the entire dictionary of last step outputs.""" if not isinstance(outputs, dict): raise ValueError("Need a dictionary to set last_step_outputs.") self._last_step_outputs = outputs def set_last_step_output(self, name, output, reduce_op=None): """Set `output` with `name` to be outputted from the last step. Args: name: String, name to identify the output. Doesn't need to match tensor name. output: The tensors that should be outputted with `name`. See below for actual types supported. reduce_op: Reduction method to use to reduce outputs from multiple replicas. Required if `set_last_step_output` is called in a replica context. Optional in cross_replica_context. When present, the outputs from all the replicas are reduced using the current distribution strategy's `reduce` method. Hence, the type of `output` must be what's supported by the corresponding `reduce` method. For e.g. if using MirroredStrategy and reduction is set, output must be a `PerReplica` value. The reduce method is also recorded in a dictionary `_last_step_outputs_reduce_ops` for later interpreting of the outputs as already reduced or not. """ if distribution_strategy_context.in_cross_replica_context(): self._last_step_outputs_reduce_ops[name] = reduce_op if reduce_op is None: self._last_step_outputs[name] = output else: distribution = distribution_strategy_context.get_strategy() self._last_step_outputs[name] = distribution.reduce(reduce_op, output, axis=None) else: assert reduce_op is not None def merge_fn(distribution, value): self._last_step_outputs[name] = distribution.reduce(reduce_op, value, axis=None) # Setting this inside the `merge_fn` because all replicas share the same # context object, so it's more robust to set it only once (even if all # the replicas are trying to set the same value). self._last_step_outputs_reduce_ops[name] = reduce_op distribution_strategy_context.get_replica_context().merge_call( merge_fn, args=(output,)) @property def non_tensor_outputs(self): """A dictionary consisting of any non tensor outputs to be captured.""" return self._non_tensor_outputs def set_non_tensor_output(self, name, output): """Set `output` with `name` to be captured as a non tensor output.""" if distribution_strategy_context.in_cross_replica_context(): self._non_tensor_outputs[name] = output else: def merge_fn(distribution, value): # NOTE(priyag): For non tensor outputs, we simply return all the values # in a list as reduction doesn't make sense on non tensors. self._non_tensor_outputs[name] = ( distribution.experimental_local_results(value)) distribution_strategy_context.get_replica_context().merge_call( merge_fn, args=(output,))
tensorflow-master
tensorflow/python/distribute/input_lib.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for cross_device_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections as pycoll import threading from tensorflow.python.distribute import all_reduce from tensorflow.python.distribute import values as value_lib from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import collective_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nccl_ops def aggregate_gradients_using_nccl(replica_grads): """Aggregate gradients using nccl allreduce.""" agg_all_g_and_v = [] for single_g_and_v in zip(*replica_grads): single_grads = [g for g, _ in single_g_and_v] agg_grads = nccl_ops.all_sum(single_grads) agg_all_g_and_v.append( [(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)]) agg_all_g_and_v = list(zip(*agg_all_g_and_v)) return agg_all_g_and_v def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads): """Aggregate gradients using hierarchical copies. Args: avail_devices: available GPU devices. replica_grads: List of lists of (gradient, variable) tuples. The outer list is over replicas. The inner list is over individual gradients. Returns: The list of (aggregated_gradient, variable), where the gradient has been summed across all replicas and the variable is chosen from the first replica. """ # This only works for DGX-1 type of machine topology # Device peer to peer matrix # DMA: 0 1 2 3 4 5 6 7 # 0: Y Y Y Y Y N N N # 1: Y Y Y Y N Y N N # 2: Y Y Y Y N N Y N # 3: Y Y Y Y N N N Y # 4: Y N N N Y Y Y Y # 5: N Y N N Y Y Y Y # 6: N N Y N Y Y Y Y # 7: N N N Y Y Y Y Y agg_grads = [] num_devices = len(avail_devices) # In the special case of DGX-1 machine topology, the two groups have equal # size. group_size = num_devices // 2 for i, single_grads in enumerate(zip(*replica_grads)): group_0_main_device = i % num_devices group_1_main_device = (group_0_main_device + group_size) % num_devices if group_0_main_device < group_size: group_0_begin = 0 group_1_begin = group_size else: group_0_begin = group_size group_1_begin = 0 # Aggregate the first group. group_0_device_grads = single_grads[group_0_begin: group_0_begin + group_size] with ops.device(avail_devices[group_0_main_device]): group_0_agg_grads, _ = aggregate_single_gradient_using_copy( group_0_device_grads, False, False) # Aggregate the second group. group_1_device_grads = single_grads[group_1_begin: group_1_begin + group_size] with ops.device(avail_devices[group_1_main_device]): group_1_agg_grads, _ = aggregate_single_gradient_using_copy( group_1_device_grads, False, False) # Aggregate between the groups. with ops.device(avail_devices[group_0_main_device]): (agg_total_grads, _), _ = aggregate_single_gradient_using_copy( [group_0_agg_grads, group_1_agg_grads], False, False) # Broadcast the result back into the root of each group. with ops.device(avail_devices[group_0_main_device]): group_0_agg_grads_bcast = array_ops.identity(agg_total_grads) with ops.device(avail_devices[group_1_main_device]): group_1_agg_grads_bcast = array_ops.identity(agg_total_grads) agg_grads_bcast = [] for j in range(len(single_grads)): with ops.device(avail_devices[j]): # Broadcast the result back to each member in the group from the root. if (group_0_main_device < group_size) == (j < group_size): src_device_grad = group_0_agg_grads_bcast else: src_device_grad = group_1_agg_grads_bcast agg_grads_bcast.append(array_ops.identity(src_device_grad)) agg_grads.append( [(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)]) agg_grads = list(zip(*agg_grads)) return agg_grads def aggregate_single_gradient_using_copy(grad_and_vars, use_mean, check_inf_nan): """Calculate the average gradient for a shared variable across all replicas. Note that this function provides a synchronization point across all replicas. Args: grad_and_vars: A list or tuple of (gradient, variable) tuples. Each (gradient, variable) pair within the outer list represents the gradient of the variable calculated for a single replica, and the number of pairs equals the number of replicas. use_mean: if True, mean is taken, else sum of gradients is taken. check_inf_nan: check grads for nans and infs. Returns: The tuple ([(average_gradient, variable),], has_nan_or_inf) where the gradient has been averaged across all replicas. The variable is chosen from the first replica. The has_nan_or_inf indicates the grads has nan or inf. """ grads = [g for g, _ in grad_and_vars] grad = math_ops.add_n(grads) if use_mean and len(grads) > 1: grad = array_ops.multiply(grad, 1.0 / len(grads)) v = grad_and_vars[0][1] if check_inf_nan: has_nan_or_inf = array_ops.logical_not( array_ops.reduce_all(array_ops.is_finite(grads))) return (grad, v), has_nan_or_inf else: return (grad, v), None def group_device_names(devices, group_size): """Group device names into groups of group_size. Args: devices: a list of canonical device strings. group_size: integer which is equal to or greater than 1. Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) % group_size == 0 then each device will appear exactly once. Raises: ValueError: if group_size > len(devices) """ num_devices = len(devices) if group_size > num_devices: raise ValueError( 'only %d devices, but group_size=%d' % (num_devices, group_size)) num_groups = ( num_devices // group_size + (1 if (num_devices % group_size != 0) else 0)) groups = [[] for i in range(num_groups)] for i in range(num_groups * group_size): groups[i % num_groups].append(devices[i % num_devices]) return groups def split_grads_by_size(threshold_size, device_grads): """Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices. The inner list is over individual gradients. Returns: small_grads: Subset of device_grads where shape is <= threshold_size elements. large_grads: Subset of device_grads where shape is > threshold_size elements. """ small_grads = [] large_grads = [] for dl in device_grads: small_dl = [] large_dl = [] for (g, v) in dl: tensor_size = g.get_shape().num_elements() if tensor_size <= threshold_size: small_dl.append([g, v]) else: large_dl.append([g, v]) if small_dl: small_grads.append(small_dl) if large_dl: large_grads.append(large_dl) return small_grads, large_grads # threading.Lock() and threading.local() cannot be pickled and therefore cannot # be a field of CollectiveKeys. Right now _thread_local is not necessary to be # an instance member of CollectiveKeys since we always create a new thread for # each replica. _lock = threading.Lock() _thread_local = threading.local() # TODO(yuefengz): use random key starts to avoid reusing keys? class CollectiveKeys(object): """Class that manages collective keys. We need to manage three different keys for collective: *Group key*: an integer key to identify the set of cooperative devices. Collective ops work under the same set of devices must using the same group key. *Instance key*: an integer key to identify the set of same counterpart of tensors on different devices in a device group that need to be all-reduced. "Graph key": an integer key that is unique key graph. This is used to support multiple graphs per client session. It must be non-zero and set in the `config` argument of each call to `session.run`. """ def __init__(self, group_key_start=1, op_instance_key_start=100, variable_instance_key_start=1000000): """Initializes the object. Args: group_key_start: the starting integer of group key. op_instance_key_start: the starting integer of instance key for ops. variable_instance_key_start: the starting integer of instance key for variables. """ self._group_key = group_key_start self._group_key_table = {} assert op_instance_key_start != variable_instance_key_start self._op_instance_key_start = op_instance_key_start self._variable_instance_key = variable_instance_key_start def _get_thread_local_object(self): # We make instance key without key ids thread local so that it will work # with MirroredStrategy and distribute coordinator. if not hasattr(_thread_local, 'op_instance_key'): _thread_local.op_instance_key = self._op_instance_key_start return _thread_local def get_group_key(self, devices): """Returns a group key for the set of devices. Args: devices: list of strings naming devices in a collective group. Returns: int key uniquely identifying the set of device names. """ parsed = [pydev.DeviceSpec.from_string(d) for d in devices] # In the between-graph replicated training, different workers need to get # the same device key. So we remove the task_type and task_id from the # devices. # TODO(yuefengz): in the in-graph replicated training, we need to include # task_type and task_id. names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed]) key_id = ','.join(names) with _lock: if key_id not in self._group_key_table: new_key = self._group_key self._group_key += 1 self._group_key_table[key_id] = new_key return self._group_key_table[key_id] def get_op_instance_key(self): """Returns a new instance key for use in defining a collective op.""" v = self._get_thread_local_object().op_instance_key self._get_thread_local_object().op_instance_key += 1 return v def get_variable_instance_key(self): """Returns a new instance key for use in creating a Variable.""" v = self._variable_instance_key self._variable_instance_key += 1 return v def build_collective_reduce(input_tensors, num_workers, collective_keys, reduction_op='Add', unary_op='Id'): """Build a subgraph that does one full all-reduce, using the collective Op. Args: input_tensors: tensors within a single worker graph that are to be reduced together; must be one per device. num_workers: total number of workers with identical independent graphs that will be doing this same reduction. The reduction will actually include the corresponding tensors at all these workers. collective_keys: a CollectiveKeys object. reduction_op: string naming the reduction op. unary_op: string naming the unary final op. Returns: An array of final tensors, one per device, computed by the full reduction. Raises: ValueError: There must be at least two tensors over all the workers. """ group_size = len(input_tensors) * num_workers if group_size < 2: return input_tensors devices = [t.device for t in input_tensors] num_devices = len(devices) group_key = collective_keys.get_group_key(devices) instance_key = collective_keys.get_op_instance_key() subdiv_offsets = [0] # TODO(tucker): maybe support non-default subdiv spec def collective_all_reduce(): """Call collective allreduce.""" assert not context.executing_eagerly() out_tensors = [] for d in range(num_devices): with ops.device(devices[d]): reduce_op = collective_ops.all_reduce( input_tensors[d], group_size, group_key, instance_key, reduction_op, unary_op, subdiv_offsets) out_tensors.append(reduce_op) return out_tensors if context.executing_eagerly(): # Collective ops will block unless they are executed concurrently such as in # a graph or a defun. collective_all_reduce = def_function.function(collective_all_reduce) return collective_all_reduce() def build_collective_gather(input_tensors, num_workers, collective_keys): """Build a subgraph that does one full all-gather, using the collective Op. Args: input_tensors: tensors within a single worker graph that are to be gathered together; must be one per device. num_workers: total number of workers with identical independent graphs that will be doing this same reduction. The reduction will actually include the corresponding tensors at all these workers. collective_keys: a CollectiveKeys object. Returns: An array of final tensors, one per device, computed by the full gather. Raises: ValueError: There must be at least two tensors over all the workers. """ group_size = len(input_tensors) * num_workers if group_size < 2: return input_tensors devices = [t.device for t in input_tensors] num_devices = len(devices) group_key = collective_keys.get_group_key(devices) instance_key = collective_keys.get_op_instance_key() def collective_all_gather(): """Call collective allgather.""" assert not context.executing_eagerly() out_tensors = [] for d in range(num_devices): with ops.device(devices[d]): gather_op = collective_ops.all_gather(input_tensors[d], group_size, group_key, instance_key) out_tensors.append(gather_op) return out_tensors if context.executing_eagerly(): # Collective ops will block unless they are executed concurrently such as in # a graph or a defun. collective_all_gather = def_function.function(collective_all_gather) return collective_all_gather() def sum_grad_and_var_all_reduce(grad_and_vars, num_workers, alg, gpu_indices, aux_devices=None, num_shards=1): """Apply all-reduce algorithm over specified gradient tensors.""" with ops.name_scope('allreduce'): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) scaled_grads = [g for g, _ in grad_and_vars] if alg == 'nccl': summed_grads = nccl_ops.all_sum(scaled_grads) elif alg == 'xring': summed_grads = all_reduce.build_ring_all_reduce( scaled_grads, num_workers, num_shards, gpu_indices, math_ops.add) elif alg == 'nccl/xring': summed_grads = all_reduce.build_nccl_then_ring(scaled_grads, num_shards, math_ops.add) elif alg == 'nccl/rechd': summed_grads = all_reduce.build_nccl_then_recursive_hd( scaled_grads, math_ops.add) elif alg == 'nccl/pscpu': summed_grads = all_reduce.build_nccl_then_shuffle( scaled_grads, aux_devices, math_ops.add, math_ops.add_n) elif alg == 'pscpu/pscpu': second_gather_devices = aux_devices[:num_shards] summed_grads = all_reduce.build_shuffle_then_shuffle( scaled_grads, aux_devices, second_gather_devices, math_ops.add_n) elif alg in ['pscpu', 'psgpu']: summed_grads = all_reduce.build_shuffle_all_reduce( scaled_grads, aux_devices, math_ops.add_n) else: raise ValueError('unsupported all_reduce alg: ', alg) result = [] for (_, v), g in zip(grad_and_vars, summed_grads): result.append([g, v]) return result def sum_gradients_all_reduce(dev_prefixes, replica_grads, num_workers, alg, num_shards, gpu_indices): """Apply all-reduce algorithm over specified gradient tensors. Args: dev_prefixes: list of prefix strings to use to generate PS device names. replica_grads: the gradients to reduce. num_workers: number of worker processes across entire job. alg: the all-reduce algorithm to apply. num_shards: alg-specific sharding factor. gpu_indices: indices of local GPUs in order usable for ring-reduce. Returns: list of reduced tensors """ alg_contains_shuffle = any(n in alg for n in ['pscpu', 'psgpu']) is_hierarchical = '/' in alg if 'pscpu' in alg: aux_devices = [prefix + '/cpu:0' for prefix in dev_prefixes] elif 'psgpu' in alg: aux_devices = [ prefix + '/gpu:%d' % i for i in range(len(gpu_indices)) for prefix in dev_prefixes ] else: aux_devices = ['/job:localhost/cpu:0'] # Auxiliary devices for hierarchical all-reduces. aux_device_groups = group_device_names( aux_devices, num_shards if alg_contains_shuffle else 1) group_index = 0 reduced_gv_list = [] for grad_and_vars in zip(*replica_grads): reduced_gv_list.append( sum_grad_and_var_all_reduce( grad_and_vars, num_workers, alg, gpu_indices, aux_devices if is_hierarchical else aux_device_groups[group_index], num_shards)) group_index = (group_index + 1) % len(aux_device_groups) new_replica_grads = [list(x) for x in zip(*reduced_gv_list)] return new_replica_grads def extract_ranges(index_list, range_size_limit=32): """Extract consecutive ranges and singles from index_list. Args: index_list: List of monotone increasing non-negative integers. range_size_limit: Largest size range to return. If a larger consecutive range exists, it will be returned as multiple ranges. Returns: (ranges, singles) where ranges is a list of [first, last] pairs of consecutive elements in index_list, and singles is all of the other elements, in original order. """ if not index_list: return [], [] first = index_list[0] last = first ranges = [] singles = [] for i in index_list[1:]: if i == last + 1 and (last - first) <= range_size_limit: last = i else: if last > first: ranges.append([first, last]) else: singles.append(first) first = i last = i if last > first: ranges.append([first, last]) else: singles.append(first) return ranges, singles GradPackTuple = pycoll.namedtuple('GradPackTuple', 'indices vars shapes') def pack_range(key, packing, grad_vars, rng): """Form the concatenation of a specified range of gradient tensors. Args: key: Value under which to store meta-data in packing that will be used later to restore the grad_var list structure. packing: Dict holding data describing packed ranges of small tensors. grad_vars: List of (grad, var) pairs for one replica. rng: A pair of integers giving the first, last indices of a consecutive range of tensors to be packed. Returns: A tensor that is the concatenation of all the specified small tensors. """ to_pack = grad_vars[rng[0]:rng[1] + 1] members = [] variables = [] restore_shapes = [] with ops.name_scope('pack'): for g, v in to_pack: variables.append(v) restore_shapes.append(g.shape) with ops.device(g.device): members.append(array_ops.reshape(g, [-1])) packing[key] = GradPackTuple( indices=range(rng[0], rng[1] + 1), vars=variables, shapes=restore_shapes) with ops.device(members[0].device): return array_ops.concat(members, 0) def unpack_grad_tuple(gv, gpt): """Unpack a previously packed collection of gradient tensors. Args: gv: A (grad, var) pair to be unpacked. gpt: A GradPackTuple describing the packing operation that produced gv. Returns: A list of (grad, var) pairs corresponding to the values that were originally packed into gv, maybe following subsequent operations like reduction. """ elt_widths = [x.num_elements() for x in gpt.shapes] with ops.device(gv[0][0].device): with ops.name_scope('unpack'): splits = array_ops.split(gv[0], elt_widths) unpacked_gv = [] for idx, s in enumerate(splits): unpacked_gv.append((array_ops.reshape(s, gpt.shapes[idx]), gpt.vars[idx])) return unpacked_gv def pack_small_tensors(replica_grads, max_bytes=0, max_group=0): """Concatenate small gradient tensors together for reduction. Args: replica_grads: List of lists of (gradient, variable) tuples. max_bytes: Int giving max number of bytes in a tensor that may be considered small. max_group: Int giving max number of small tensors that may be concatenated into one new tensor. Returns: new_replica_grads, packing where new_replica_grads is identical to replica_grads except that all feasible small_tensors have been removed from their places and concatenated into larger tensors that are now in the front of the list for each replica, and packing contains the data necessary to restore the replica_grads structure. Look through the first replica for gradients of the same type (float), and small size, that are all sequential. For each such group, replace by a new tensor that is a flattened concatenation. Note that the corresponding variable will be absent, which doesn't matter because it isn't used during all-reduce. Requires: Every gv_list in replicas must have isomorphic structure including identical tensor sizes and types. """ small_indices = [] large_indices = [] for idx, (g, _) in enumerate(replica_grads[0]): if g.dtype == dtypes.float32 and (4 * g.shape.num_elements()) <= max_bytes: small_indices.append(idx) else: large_indices.append(idx) small_ranges, small_singles = extract_ranges( small_indices, range_size_limit=max_group) large_indices = sorted(large_indices + small_singles) num_gv = len(replica_grads[0]) packing = {} if small_ranges: new_replica_grads = [] for dev_idx, gv_list in enumerate(replica_grads): assert len(gv_list) == num_gv new_gv_list = [] for r in small_ranges: key = '%d:%d' % (dev_idx, len(new_gv_list)) new_gv_list.append((pack_range(key, packing, gv_list, r), 'packing_var_placeholder')) for i in large_indices: new_gv_list.append(gv_list[i]) new_replica_grads.append(new_gv_list) return new_replica_grads, packing else: return replica_grads, None def unpack_small_tensors(replica_grads, packing): """Undo the structure alterations to replica_grads done by pack_small_tensors. Args: replica_grads: List of List of (grad, var) tuples. packing: A dict generated by pack_small_tensors describing the changes it made to replica_grads. Returns: new_replica_grads: identical to replica_grads except that concatenations of small tensors have been split apart and returned to their original positions, paired with their original variables. """ if not packing: return replica_grads new_replica_grads = [] num_devices = len(replica_grads) num_packed = len(packing.keys()) // num_devices for dev_idx, gv_list in enumerate(replica_grads): gv_list = list(gv_list) new_gv_list = gv_list[num_packed:] for i in range(num_packed): k = '%d:%d' % (dev_idx, i) gpt = packing[k] gv = unpack_grad_tuple(gv_list[i], gpt) for gi, idx in enumerate(gpt.indices): assert idx == gpt.indices[gi] new_gv_list.insert(idx, gv[gi]) new_replica_grads.append(new_gv_list) return new_replica_grads def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n): """Aggregate tensors using `accumulation_fn` and IndexedSlices via concat.""" if any(isinstance(v, ops.IndexedSlices) for v in values): return backprop.aggregate_indexed_slices_gradients(values) else: return accumulation_fn(values) def divide_by_n_tensors_or_indexed_slices(value, n): if isinstance(value, ops.IndexedSlices): value = backprop.flatten_nested_indexed_slices(value) return ops.IndexedSlices( value.values / n, value.indices, value.dense_shape) else: return value / n def copy_tensor_or_indexed_slices_to_device(value, device): with ops.device(device): if isinstance(value, ops.IndexedSlices): copied_values = array_ops.identity(value.values) copied_indices = array_ops.identity(value.indices) copied_shape = array_ops.identity(value.dense_shape) result = ops.IndexedSlices(copied_values, copied_indices, copied_shape) else: result = array_ops.identity(value) return result def contains_indexed_slices(value): """Check whether the value is `IndexedSlices` or contains `IndexedSlices`.""" if isinstance(value, ops.IndexedSlices): return True elif isinstance(value, (list, tuple)) and value: return any(contains_indexed_slices(v) for v in value) elif isinstance(value, value_lib.DistributedValues): return contains_indexed_slices(value.values) else: return False def is_indexed_slices(value): if isinstance(value, ops.IndexedSlices): return True assert isinstance(value, value_lib.DistributedValues) return all([isinstance(v, ops.IndexedSlices) for v in value.values]) def split_by_sparsity(values): """Split values into dense and sparse values. Args: values: a list of tensors or `PerReplica`s. Returns: Four lists: a list of dense values, a list of their indices in `values` and a list of sparse values, a list of their indices in `values`. """ dense_values = [] dense_indices = [] sparse_values = [] sparse_indices = [] for i, v in enumerate(values): if is_indexed_slices(v): sparse_values.append(v) sparse_indices.append(i) else: dense_values.append(v) dense_indices.append(i) return dense_values, dense_indices, sparse_values, sparse_indices def stitch_values(values_and_indices_list): """Stitch values together according to their indices. Args: values_and_indices_list: a list of tuples of values and indices indicating the values and postions in the returned list. Returns: a stitched list of values. """ length = 0 for values_and_indices in values_and_indices_list: length += len(values_and_indices[0]) result = [None] * length for values_and_indices in values_and_indices_list: if values_and_indices and values_and_indices[0]: for v, i in zip(*values_and_indices): assert result[i] is None result[i] = v return result
tensorflow-master
tensorflow/python/distribute/cross_device_utils.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Various classes representing distributed values.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import weakref import six from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import reduce_util from tensorflow.python.eager import context from tensorflow.python.eager import tape from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops import variables as variables_lib from tensorflow.python.training import saver from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest def _devices_match(d1, d2): return device_util.canonicalize(d1) == device_util.canonicalize(d2) class DeviceMap(object): """A mapping of replicas & logical device ids to devices.""" @property def all_devices(self): """Returns a tuple of strings with all devices in this DeviceMap.""" raise NotImplementedError("Required for DeviceMap implementations.") @property def devices_by_replica(self): """Returns a tuple `t` where `t[replica]` is the devices for `replica`.""" raise NotImplementedError("Required for DeviceMap implementations.") @property def num_logical_devices(self): """Count of the number of devices each replica may be defined across.""" raise NotImplementedError("Required for DeviceMap implementations.") @property def num_replicas_in_graph(self): """Number of replicas defined in this graph.""" raise NotImplementedError("Required for DeviceMap implementations.") def logical_device_from_values(self, values): """Returns the logical device index `values` is on.""" raise NotImplementedError("Required for DeviceMap implementations.") def logical_to_actual_devices(self, logical_device_id): """Returns sequence of `num_replicas_in_graph` devices.""" raise NotImplementedError("Required for DeviceMap implementations.") def select_for_current_replica(self, values, replica_context): """Select the element of `values` for the current replica.""" raise NotImplementedError("Required for DeviceMap implementations.") def replica_for_device(self, device): """Return the replica id containing `device`.""" raise NotImplementedError("Required for DeviceMap implementations.") def select_for_device(self, values, device): """Select the element of `values` to access from `device`.""" raise NotImplementedError("Required for DeviceMap implementations.") def is_device_in_replica(self, device, replica_id): """Returns whether `device` is a member of replica `replica_id`.""" raise NotImplementedError("Required for DeviceMap implementations.") class SingleDeviceMap(DeviceMap): """A device map for 1 non-computation device. Use `SingleDeviceMap` when the device does not correspond to some replica of the computation. For computation devices, use `ReplicaDeviceMap` below (even if there is only a single device in the map). """ def __init__(self, device): """Initialize a `SingleDeviceMap`. Args: device: A string device. """ assert isinstance(device, six.string_types) self._device = device_util.canonicalize(device) self._devices = (self._device,) @property def all_devices(self): return self._devices @property def devices_by_replica(self): raise ValueError("SingleDeviceMap not indexed by replicas") @property def num_logical_devices(self): return 1 @property def num_replicas_in_graph(self): return 1 def logical_device_from_values(self, values): del values return 0 def logical_to_actual_devices(self, logical_device_id): assert logical_device_id == 0 return self._devices def select_for_current_replica(self, values, replica_context): assert len(values) == 1 del replica_context return values[0] def replica_for_device(self, device): raise ValueError("SingleDeviceMap not indexed by replicas") def select_for_device(self, values, device): assert len(values) == 1 if self._device != device: raise ValueError("Device %s not found in %s (current device %s)" % (device, self._devices, device_util.current())) return values[0] def is_device_in_replica(self, device, replica_id): raise ValueError("SingleDeviceMap not indexed by replicas") def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self._device) class ReplicaDeviceMap(DeviceMap): """A device map for 1 device per replica.""" def __init__(self, devices): """Initialize a `ReplicaDeviceMap`. Args: devices: `devices[i]` is the string device for replica `i`. """ self._devices = tuple(device_util.canonicalize(d) for d in devices) if len(set(self._devices)) != len(self._devices): raise ValueError("Duplicate devices in %s, after canonicalization: %s" % (devices, self._devices)) self._device_to_replica = {d: r for r, d in enumerate(self._devices)} @property def all_devices(self): return self._devices @property def devices_by_replica(self): return ((d,) for d in self._devices) @property def num_logical_devices(self): return 1 @property def num_replicas_in_graph(self): return len(self._devices) def logical_device_from_values(self, values): del values return 0 def logical_to_actual_devices(self, logical_device_id): assert logical_device_id == 0 return self._devices def select_for_current_replica(self, values, replica_context): assert len(values) == len(self._devices) replica_id = replica_context.replica_id_in_sync_group if not isinstance(replica_id, int): replica_id = tensor_util.constant_value(replica_id) if replica_id is None: replica_id = 0 return values[replica_id] def replica_for_device(self, device): return self._device_to_replica.get(device) def select_for_device(self, values, device): assert len(values) == len(self._devices) replica_id = self._device_to_replica.get(device) if replica_id is None: raise ValueError("Device %s not found in %s (current device %s)" % (device, self._devices, device_util.current())) return values[replica_id] def is_device_in_replica(self, device, replica_id): return _devices_match(device, self._devices[replica_id]) def __str__(self): return "[%s]" % (", ".join(self._devices)) def __repr__(self): return "%s([%s])" % (self.__class__.__name__, ", ".join(repr(d) for d in self._devices)) LogicalDeviceSpec = collections.namedtuple( "LogicalDeviceSpec", ("device_map", "logical_device")) class WorkerDeviceMap(DeviceMap): """A device map for one value per worker.""" def __init__(self, devices, num_replicas_per_worker): """Initialize a `WorkerDeviceMap`. Args: devices: `devices[i]` is the string device for worker `i` in in-graph relication case; devices is single-element list for its corresponding worker in between-graph case. num_replicas_per_worker: number of replicas per worker, useful in in-graph replication case. """ self._devices = tuple(device_util.canonicalize(d) for d in devices) if len(set(self._devices)) != len(self._devices): raise ValueError("Duplicate devices in %s, after canonicalization: %s" % (devices, self._devices)) self._num_replicas_per_worker = num_replicas_per_worker @property def all_devices(self): return self._devices @property def devices_by_replica(self): raise ValueError("`WorkerDeviceMap` is not indexed by replicas") @property def num_logical_devices(self): return 1 @property def num_replicas_in_graph(self): return len(self._devices) def logical_device_from_values(self, values): del values return 0 def logical_to_actual_devices(self, logical_device_id): assert logical_device_id == 0 return self._devices def select_for_current_replica(self, values, replica_context): return values[replica_context.replica_id_in_sync_group // self._num_replicas_per_worker] def replica_for_device(self, device): raise ValueError("`WorkerDeviceMap` not indexed by replicas") def select_for_device(self, values, device): # TODO(yuefengz): this should map from any device to the value on its # corresponding worker. return values[self._devices.index(device_util.canonicalize(device))] def is_device_in_replica(self, device, replica_id): raise ValueError("WorkerDeviceMap not indexed by replicas") def __repr__(self): return "%s(%r, num_replicas_per_worker=%d)" % ( self.__class__.__name__, self._devices, self._num_replicas_per_worker) class DistributedValues(object): """Holds a map from device to values. Either PerReplica or Mirrored.""" def __init__(self, device_map, values, logical_device=None): assert isinstance(device_map, DeviceMap) self._device_map = device_map self._values = tuple(values) if logical_device is None: logical_device = device_map.logical_device_from_values(self._values) self._logical_device = logical_device # TODO(josh11b): Split this into two functions, one with device, one without. def get(self, device=None): """Returns the value for the current device or raises a ValueError.""" if device is None: replica_context = distribution_strategy_context.get_replica_context() if replica_context: return self._device_map.select_for_current_replica( self._values, replica_context) else: device = distribute_lib.get_update_device() if device is None: return self._get_cross_replica() device = device_util.canonicalize(device) return self._device_map.select_for_device(self._values, device) @property def primary(self): """Returns a representative component.""" return self._values[0] @property def devices(self): return self._device_map.logical_to_actual_devices(self._logical_device) @property def logical_device(self): return self._logical_device @property def device_map(self): return self._device_map # TODO(josh11b): Replace experimental_local_results with this? @property def values(self): return self._values @property def is_tensor_like(self): return all(tensor_util.is_tensor(v) for v in self._values) def __str__(self): devices = self.devices assert len(self._values) == len(devices) debug_str = ",\n".join(" %d %s: %s" % (i, devices[i], self._values[i]) for i in range(len(devices))) return "%s:{\n%s\n}" % (self.__class__.__name__, debug_str) def __repr__(self): devices = self.devices assert len(self._values) == len(devices) debug_repr = ",\n".join(" %d %s: %r" % (i, devices[i], self._values[i]) for i in range(len(devices))) return "%s:{\n%s\n}" % (self.__class__.__name__, debug_repr) # NOTE(josh11b,apassos): It would be great if we could inspect the values this was # initialized with and use that to generate the overloaded operators here. # Unfortunately, Python's rules for special methods don't allow this, see # https://docs.python.org/3/reference/datamodel.html#special-method-names # "if a class defines a method named __getitem__(), and x is an instance of # this class, then x[i] is roughly equivalent to type(x).__getitem__(x, i)." # In particular, these special methods don't go through __getattr__, and # it will only use those methods if they are defined in the class, not the # object. class DistributedDelegate(DistributedValues): """A map from device to values; acts as the same type as the values.""" def __getattr__(self, name): # The '_use_resource_variables' and the attrs starts with '_self' are used # for restoring the saved_model proto. At the point these attrs are queried, # the variable has not been initialized. Thus it should not query those of # the underlying components. if name.startswith("_self_") or name == "_use_resource_variables": return super(DistributedDelegate, self).__getattr__(name) # TODO(priyag): This needs to be made robust against pitfalls from mix use # __getattr__ and @property. See b/120402273. return getattr(self.get(), name) def _get_as_operand(self): """Returns the value for operations for the current device. Some implementations, e.g. `TPUMirroredVariable`, are not able to return the value type within a replica context. They can, however, return a value that can be used by the operations below. """ return self.get() # pylint: disable=multiple-statements def __add__(self, o): return self._get_as_operand() + o def __radd__(self, o): return o + self._get_as_operand() def __sub__(self, o): return self._get_as_operand() - o def __rsub__(self, o): return o - self._get_as_operand() def __mul__(self, o): return self._get_as_operand() * o def __rmul__(self, o): return o * self._get_as_operand() def __truediv__(self, o): return self._get_as_operand() / o def __rtruediv__(self, o): return o / self._get_as_operand() def __floordiv__(self, o): return self._get_as_operand() // o def __rfloordiv__(self, o): return o // self._get_as_operand() def __mod__(self, o): return self._get_as_operand() % o def __rmod__(self, o): return o % self._get_as_operand() def __lt__(self, o): return self._get_as_operand() < o def __le__(self, o): return self._get_as_operand() <= o def __gt__(self, o): return self._get_as_operand() > o def __ge__(self, o): return self._get_as_operand() >= o def __and__(self, o): return self._get_as_operand() & o def __rand__(self, o): return o & self._get_as_operand() def __or__(self, o): return self._get_as_operand() | o def __ror__(self, o): return o | self._get_as_operand() def __xor__(self, o): return self._get_as_operand() ^ o def __rxor__(self, o): return o ^ self._get_as_operand() def __getitem__(self, o): return self._get_as_operand()[o] def __pow__(self, o, modulo=None): return pow(self._get_as_operand(), o, modulo) def __rpow__(self, o): return pow(o, self._get_as_operand()) def __invert__(self): return ~self._get_as_operand() def __neg__(self): return -self._get_as_operand() def __abs__(self): return abs(self._get_as_operand()) def __div__(self, o): try: return self._get_as_operand().__div__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rdiv__(self, o): try: return self._get_as_operand().__rdiv__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __matmul__(self, o): try: return self._get_as_operand().__matmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rmatmul__(self, o): try: return self._get_as_operand().__rmatmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented # TODO(josh11b): Even more operator overloads. class PerReplica(DistributedValues, composite_tensor.CompositeTensor): """Holds a map from device to unsynchronized values.""" @property def _type_spec(self): value_specs = [type_spec.type_spec_from_value(v) for v in self._values] return PerReplicaSpec(value_specs, self._device_map, self._logical_device) class PerReplicaSpec(type_spec.TypeSpec): """Type specification for a `PerReplica`.""" __slots__ = ["_value_specs", "_device_map", "_logical_device"] value_type = property(lambda self: PerReplica) def __init__(self, value_specs, device_map, logical_device): if isinstance(device_map, tuple): device_map = self._deserialize_device_map(device_map) self._value_specs = tuple(value_specs) self._device_map = device_map self._logical_device = logical_device def _serialize(self): device_map = self._serialize_device_map(self._device_map) return (self._value_specs, device_map, self._logical_device) @property def _component_specs(self): return self._value_specs def _to_components(self, value): replica_context = distribution_strategy_context.get_replica_context() if replica_context is not None and replica_context.num_replicas_in_sync > 1: raise ValueError( "Flattening a PerReplica to components is not supported in replica " "context.") return value._values # pylint: disable=protected-access def _from_components(self, tensor_list): return PerReplica(self._device_map, tensor_list, logical_device=self._logical_device) @staticmethod def _serialize_device_map(device_map): if isinstance(device_map, SingleDeviceMap): return ("single", device_map.all_devices[0]) elif isinstance(device_map, ReplicaDeviceMap): return ("replica", device_map.all_devices) elif isinstance(device_map, WorkerDeviceMap): return ("worker", device_map.all_devices, device_map.num_replicas_per_worker) else: raise ValueError("PerReplicaSpec does not support device_map type %s" % type(device_map).__name__) @staticmethod def _deserialize_device_map(device_map_info): device_map_type = device_map_info[0] device_map_args = device_map_info[1:] if device_map_type == "single": return SingleDeviceMap(*device_map_args) elif device_map_type == "replica": return ReplicaDeviceMap(*device_map_args) elif device_map_type == "worker": return WorkerDeviceMap(*device_map_args) else: raise ValueError("Unexpected value in state tuple") # Note that unlike PerReplica, Mirrored values inherit from # DistributedDelegate and so can be used directly in cross-replica mode. # TODO(tomhennigan) Should this extend CompositeTensor? class Mirrored(DistributedDelegate): """Holds a map from device to values which are kept in sync.""" def _get_cross_replica(self): device = device_util.canonicalize(device_util.current()) replica_id = self._device_map.replica_for_device(device) if replica_id is None: return self.primary return self._values[replica_id] def _as_graph_element(self): obj = self.get() conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return obj def _assign_on_device(device, variable, tensor): with ops.device(device): return variable.assign(array_ops.identity(tensor)) def _assign_add_on_device(device, variable, tensor): with ops.device(device): return variable.assign_add(array_ops.identity(tensor)) def _assign_sub_on_device(device, variable, tensor): with ops.device(device): return variable.assign_sub(array_ops.identity(tensor)) def _assert_strategy(strategy): if not distribution_strategy_context.has_strategy(): raise RuntimeError( 'Need to be inside "with strategy.scope()" for %s' % (strategy,)) current_strategy = distribution_strategy_context.get_strategy() if current_strategy is not strategy: raise RuntimeError( "Mixing different tf.distribute.Strategy objects: %s is not %s" % (current_strategy, strategy)) @contextlib.contextmanager def _enter_or_assert_strategy(strategy): if not distribution_strategy_context.has_strategy(): with strategy.scope(): yield else: _assert_strategy(strategy) yield DistributedVarOp = collections.namedtuple( "DistributedVarOp", ["name", "graph", "type"]) class DistributedVariable(DistributedDelegate, variables_lib.AbstractVariable): """Holds a map from device to variables.""" # TODO(josh11b): Support changing the set of variables if e.g. if new # devices are joining or a device is to leave. def __init__(self, strategy, device_map, values, logical_device=None): self._distribute_strategy = strategy super(DistributedVariable, self).__init__( device_map, values, logical_device=logical_device) self._common_name = self.primary.name.split(":")[0] # Use a weakref to make it easy to map from the contained values # to the container without introducing a reference cycle. for v in values: v._distributed_container = weakref.ref(self) # pylint: disable=protected-access # tf.keras keeps track of variables initialized using this attribute. When # tf.keras gets the default session, it initializes all uninitialized vars. # We need to make _keras_initialized a member of DistributedVariable because # without this it will use `__getattr__` which will delegate to a component # variable. self._keras_initialized = False # Typically, a `DistributedVariable`'s initializer is composed of the # initializers of the components variables. However, in some cases, such as # when restoring from a checkpoint, we may set the _initializer_op # property on the entire `DistributedVariable`. self._initializer_op = None def is_initialized(self, name=None): """Identifies if all the component variables are initialized. Args: name: Name of the final `logical_and` op. Returns: The op that evaluates to True or False depending on if all the component variables are initialized. """ result = self.primary.is_initialized() # We iterate through the list of values except the last one to allow us to # name the final `logical_and` op the same name that is passed by the user # to the `is_initialized` op. For distributed variables, the # `is_initialized` op is a `logical_and` op. for v in self._values[1:-1]: result = math_ops.logical_and(result, v.is_initialized()) result = math_ops.logical_and(result, self._values[-1].is_initialized(), name=name) return result @property def initializer(self): if self._initializer_op: init_op = self._initializer_op else: # return grouped ops of all the var initializations of component values of # the mirrored variable init_op = control_flow_ops.group(tuple( v.initializer for v in self._values)) return init_op def _get_closest(self): """Return member in the same replica if possible, else the primary.""" replica_context = distribution_strategy_context.get_replica_context() if replica_context: return self._device_map.select_for_current_replica( self._values, replica_context) device = distribute_lib.get_update_device() if device is None: device = device_util.canonicalize(device_util.current()) replica_id = self._device_map.replica_for_device(device) if replica_id is None: return self.primary return self._values[replica_id] def initialized_value(self): return self._get_closest().initialized_value() @property def initial_value(self): return self._get_closest().initial_value @property def graph(self): return self.primary.graph @property def _shared_name(self): return self._common_name @property def _unique_id(self): return self.primary._unique_id # pylint: disable=protected-access @property def _graph_key(self): """Lets Optimizers know which graph this variable is from.""" return self.primary._graph_key # pylint: disable=protected-access @property def name(self): return self.primary.name @property def dtype(self): return self.primary.dtype @property def shape(self): return self.primary.shape @property def synchronization(self): return self.primary.synchronization @property def handle(self): device = None replica_context = distribution_strategy_context.get_replica_context() if replica_context is None: device = distribute_lib.get_update_device() if device is None: raise ValueError("`handle` is not available outside the replica context" " or a `tf.distribute.Strategy.update()` call.") return self.get(device=device).handle def eval(self, session=None): return self._get_closest().eval(session) @property def _save_slice_info(self): return self.primary._save_slice_info # pylint: disable=protected-access def _get_save_slice_info(self): return self.primary._get_save_slice_info() # pylint: disable=protected-access def _set_save_slice_info(self, save_slice_info): for v in self._values: v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access @property def device(self): return self._get_closest().device @property def trainable(self): return self.primary.trainable @property def distribute_strategy(self): return self._distribute_strategy def get_shape(self): return self.primary.get_shape() def to_proto(self, export_scope=None): return self.primary.to_proto(export_scope=export_scope) @property def op(self): # We want cross-replica code that does some var.op.X calls # to work (even if the current device isn't in self.devices), but # other uses of var.op in a cross-replica context to fail. if distribution_strategy_context.in_cross_replica_context(): return DistributedVarOp(self.primary.op.name, self.primary.op.graph, self.primary.op.type) return self.get().op @property def _in_graph_mode(self): return self.primary._in_graph_mode # pylint: disable=protected-access def read_value(self): with _enter_or_assert_strategy(self._distribute_strategy): return array_ops.identity(self.get()) def value(self): return self._get_closest().value() def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass ops.register_dense_tensor_like_type(DistributedVariable) def _validate_colocate_extended(v, extended): variable_strategy = v._distribute_strategy # pylint: disable=protected-access if variable_strategy.extended is not extended: raise ValueError( "`colocate_vars_with` must only be passed a variable created in this " "tf.distribute.Strategy.scope(), not %s created in scope: %s" % (v, variable_strategy)) def validate_colocate_distributed_variable(v, extended): if not isinstance(v, DistributedVariable): raise ValueError( "`colocate_vars_with` must only be passed a variable created in this " "tf.distribute.Strategy.scope(), not: %r" % (v,)) _validate_colocate_extended(v, extended) def validate_colocate_tpu_variable(v, extended): if not isinstance(v, TPUMirroredVariable): raise ValueError( "`colocate_vars_with` must only be passed a variable created in this " "tf.distribute.Strategy.scope(), not: %r" % (v,)) _validate_colocate_extended(v, extended) def validate_colocate(v, extended): if not hasattr(v, "_distribute_strategy"): raise ValueError( "`colocate_vars_with` must only be passed a variable created in this " "tf.distribute.Strategy.scope(), not: %r" % (v,)) _validate_colocate_extended(v, extended) def _apply_aggregation(strategy, value, aggregation, destinations): if aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA: return strategy.extended.broadcast_to( strategy.experimental_local_results(value)[0], destinations=destinations) reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation) return strategy.extended.reduce_to(reduce_op, value, destinations) _aggregation_error_msg = ( "You must specify an aggregation method to update a " "{variable_type} in Replica Context. You can do so by passing " "an explicit value for argument `aggregation` to tf.Variable(..)." "e.g. `tf.Variable(..., aggregation=tf.VariableAggregation.SUM)`" "`tf.VariableAggregation` lists the possible aggregation methods." "This is required because {variable_type} should always be " "kept in sync. When updating them or assigning to them in a " "replica context, we automatically try to aggregate the values " "before updating the variable. For this aggregation, we need to " "know the aggregation method. " "Another alternative is to not try to update such " "{variable_type} in replica context, but in cross replica " "context. You can enter cross replica context by calling " "`tf.distribute.get_replica_context().merge_call(merge_fn, ..)`." "Inside `merge_fn`, you can then update the {variable_type} " "using `tf.distribute.StrategyExtended.update()`.") class _MirroredSaveable(saver.BaseSaverBuilder.ResourceVariableSaveable): """Class for defining how to restore a MirroredVariable.""" def __init__(self, mirrored_variable, primary_variable, name): self._mirrored_variable = mirrored_variable super(_MirroredSaveable, self).__init__(primary_variable, "", name) def restore(self, restored_tensors, restored_shapes): """Restore the same value into all variables.""" tensor, = restored_tensors return control_flow_ops.group(tuple( _assign_on_device(v.device, v, tensor) for v in self._mirrored_variable.values)) class MirroredVariable(DistributedVariable, Mirrored): """Holds a map from device to variables whose values are kept in sync.""" def __init__( self, strategy, device_map, values, aggregation, logical_device=None): super(MirroredVariable, self).__init__( strategy, device_map, values, logical_device=logical_device) self._aggregation = aggregation # The arguments to update() are automatically unwrapped so the update() # function would normally see regular variables, not MirroredVariables. # However, the update function can still operate on wrapped MirroredVariables # through object members, captured arguments, etc. This is more likely in an # update_non_slot() function (like OptimizerV2._finish), which can # update several non-slot variables in one call. def _assign_func(self, *args, **kwargs): with _enter_or_assert_strategy(self._distribute_strategy): f = kwargs.pop("f") if distribution_strategy_context.in_cross_replica_context(): update_device = distribute_lib.get_update_device() if update_device is not None: # We are calling an assign function on the mirrored variable in an # update context. v = self.get(device=update_device) return f(v, *args, **kwargs) # We are calling assign on the mirrored variable in cross replica # context, use `strategy.extended.update()` to update the variable. return self._distribute_strategy.extended.update( self, f, args=args, kwargs=kwargs) else: _assert_replica_context(self._distribute_strategy) # We are calling an assign function on the mirrored variable in replica # context. # We reduce the value we want to assign/add/sub. More details about how # we handle the different use cases can be found in the _reduce method. # We call the function on each of the mirrored variables with the # reduced value. if self._aggregation == vs.VariableAggregation.NONE: raise ValueError(_aggregation_error_msg.format( variable_type="MirroredVariable")) def merge_fn(strategy, value, *other_args, **other_kwargs): v = _apply_aggregation(strategy, value, self._aggregation, self) return strategy.extended.update( self, f, args=(v,) + other_args, kwargs=other_kwargs) return distribution_strategy_context.get_replica_context().merge_call( merge_fn, args=args, kwargs=kwargs) def assign_sub(self, *args, **kwargs): assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw) return self._assign_func(f=assign_sub_fn, *args, **kwargs) def assign_add(self, *args, **kwargs): assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw) return self._assign_func(f=assign_add_fn, *args, **kwargs) def assign(self, *args, **kwargs): assign_fn = lambda var, *a, **kw: var.assign(*a, **kw) return self._assign_func(f=assign_fn, *args, **kwargs) @property def aggregation(self): return self._aggregation def _get_cross_replica(self): device = device_util.canonicalize(device_util.current()) replica_id = self._device_map.replica_for_device(device) if replica_id is None: return array_ops.identity(self.primary) return array_ops.identity(self._values[replica_id]) def _as_graph_element(self): # pylint: disable=protected-access if distribution_strategy_context.in_cross_replica_context(): return self.primary._as_graph_element() return self.get()._as_graph_element() def _gather_saveables_for_checkpoint(self): """Overrides Trackable method. This allows both name-based and object-based save and restore of MirroredVariables. Returns: A dictionary mapping attribute names to `SaveableObject` factories. """ def _saveable_factory(name=self._common_name): return _MirroredSaveable(self, self.primary, name) return {trackable.VARIABLE_VALUE_KEY: _saveable_factory} def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts a variable to a tensor.""" # Try to avoid assignments to and other mutations of MirroredVariable # state except through a DistributionStrategy.extended.update() call. assert not as_ref return ops.internal_convert_to_tensor( self.get(), dtype=dtype, name=name, as_ref=as_ref) # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. def _tensor_conversion_mirrored(var, dtype=None, name=None, as_ref=False): return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref) # pylint: disable=protected-access ops.register_tensor_conversion_function(MirroredVariable, _tensor_conversion_mirrored) def _enclosing_tpu_context(): # pylint: disable=protected-access tpu_context = ops.get_default_graph()._get_control_flow_context() # pylint: enable=protected-access while tpu_context is not None and not isinstance( tpu_context, control_flow_ops.XLAControlFlowContext): tpu_context = tpu_context.outer_context return tpu_context def is_distributed_variable(v): """Determine if a variable is ds variable or TPU mirrored variable.""" return isinstance(v, DistributedVariable) class TPUMirroredVariable(MirroredVariable): """Holds a map from device to TPU variables whose values are kept in sync.""" def __init__( self, strategy, device_map, values, aggregation, logical_device=None): super(TPUMirroredVariable, self).__init__( strategy=strategy, device_map=device_map, values=values, aggregation=aggregation, logical_device=logical_device) # Handle id is needed for get_replicated_var_handle to cache the variables # correctly since in eager mode different variables can have the same name. if ops.executing_eagerly_outside_functions(): self._handle_id = self._common_name + "_" + str(id(self.primary)) else: self._handle_id = self._common_name def __getattr__(self, name): if _enclosing_tpu_context() is None: return super(TPUMirroredVariable, self).__getattr__(name) else: raise AttributeError( "'{}' not accessible within a TPU context.".format(name)) def get(self, device=None): if (_enclosing_tpu_context() is None) or (device is not None): return super(TPUMirroredVariable, self).get(device=device) else: raise NotImplementedError( "`TPUMirroredVariable.get()` is not supported within a TPU context.") def _get_as_operand(self): return self.read_value() def _get_closest(self): if _enclosing_tpu_context() is None: return super(TPUMirroredVariable, self)._get_closest() else: return self.primary def numpy(self): if context.executing_eagerly(): return self.read_value().numpy() raise NotImplementedError( "numpy() is only available when eager execution is enabled.") @property def handle(self): # If we're in a tpu.rewrite(), return the replicated handle. tpu_context = _enclosing_tpu_context() if tpu_context is None: return self._get_closest().handle else: return tpu_context.get_replicated_var_handle( self._handle_id, self._values) @property def device(self): return self.handle.device @contextlib.contextmanager def _handle_graph(self, handle): # Note: might have an eager tensor but not be executing eagerly when # building functions. if (context.executing_eagerly() or isinstance(handle, ops.EagerTensor) or ops.has_default_graph()): yield else: with handle.graph.as_default(): yield def _read_variable_op(self, parent_op=None): if self.trainable: tape.variable_accessed(self) if parent_op is not None: with ops.control_dependencies([parent_op]): return gen_resource_variable_ops.read_variable_op( self.handle, self.dtype) return gen_resource_variable_ops.read_variable_op( self.handle, self.dtype) def read_value(self): return self._read_variable_op() def _assign_func(self, *args, **kwargs): with _enter_or_assert_strategy(self._distribute_strategy): if (distribution_strategy_context.in_cross_replica_context() and (_enclosing_tpu_context() is not None)): f = kwargs.pop("f") return self._distribute_strategy.extended.update( self, f, args=args, kwargs=kwargs) else: return super(TPUMirroredVariable, self)._assign_func(*args, **kwargs) def _make_raw_assign_fn(self, raw_assign_fn): def assign_fn(var, value, *args, **kwargs): del args name = kwargs.pop("name", None) read_value = kwargs.pop("read_value", True) with self._handle_graph(var.handle): op = raw_assign_fn( var.handle, ops.convert_to_tensor(value, dtype=self.dtype), name=name) return self._read_variable_op(parent_op=op) if read_value else op return assign_fn def assign_sub(self, *args, **kwargs): assign_sub_fn = self._make_raw_assign_fn( gen_resource_variable_ops.assign_sub_variable_op) return self._assign_func(f=assign_sub_fn, *args, **kwargs) def assign_add(self, *args, **kwargs): assign_add_fn = self._make_raw_assign_fn( gen_resource_variable_ops.assign_add_variable_op) return self._assign_func(f=assign_add_fn, *args, **kwargs) def assign(self, *args, **kwargs): assign_fn = self._make_raw_assign_fn( gen_resource_variable_ops.assign_variable_op) return self._assign_func(f=assign_fn, *args, **kwargs) @property def constraint(self): return self.primary.constraint def _as_graph_element(self): if _enclosing_tpu_context() is None: return super(TPUMirroredVariable, self)._as_graph_element() # pylint: disable=protected-access else: return None # Needed to pass ResourceVariable checks. @property def op(self): return self.primary.op def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False): """Converts a variable to a tensor.""" # pylint: disable=protected-access if _enclosing_tpu_context() is None: return super(TPUMirroredVariable, self)._dense_var_to_tensor( dtype, name, as_ref) # pylint: enable=protected-access if dtype is not None and dtype != self.dtype: return math_ops.cast(self.read_value(), dtype) if as_ref: return self.handle else: return self.read_value() class _SyncOnReadSaveable(saver.BaseSaverBuilder.SaveableObject): """Class for defining how to restore a SyncOnReadVariable.""" def __init__(self, sync_on_read_variable, name): self._sync_on_read_variable = sync_on_read_variable # We use a callable so that we don't have to evaluate this expression # in the case where we are trying to restore instead of save. def tensor(): strategy = sync_on_read_variable._distribute_strategy # pylint: disable=protected-access return strategy.extended.read_var(sync_on_read_variable) spec = saver.BaseSaverBuilder.SaveSpec( tensor=tensor, slice_spec="", name=name, dtype=sync_on_read_variable.dtype, device=sync_on_read_variable.primary.device) super(_SyncOnReadSaveable, self).__init__(tensor, [spec], name) def restore(self, restored_tensors, restored_shapes): """Restore the same value into all variables.""" tensor, = restored_tensors return self._sync_on_read_variable.assign(tensor) def _assert_replica_context(strategy): replica_context = distribution_strategy_context.get_replica_context() if not replica_context: raise RuntimeError( "Replica-local variables may only be assigned in a replica context.") if replica_context.strategy is not strategy: raise RuntimeError( "Replica-local variables may only be assigned in a replica context.") class SyncOnReadVariable(DistributedVariable, PerReplica): """Holds a map from device to variables whose values are reduced on save.""" def __init__( self, strategy, device_map, values, aggregation, logical_device=None): self._aggregation = aggregation super(SyncOnReadVariable, self).__init__( strategy, device_map, values, logical_device=logical_device) def assign_sub(self, *args, **kwargs): with _enter_or_assert_strategy(self._distribute_strategy): if distribution_strategy_context.in_cross_replica_context(): if self._aggregation == vs.VariableAggregation.SUM: raise ValueError( "SyncOnReadVariable does not support `assign_sub` in " "cross-replica context when aggregation is set to " "`tf.VariableAggregation.SUM`.") return control_flow_ops.group(tuple( _assign_sub_on_device(v.device, v, args[0]) for v in self._values)) else: return self.get().assign_sub(*args, **kwargs) def assign_add(self, *args, **kwargs): with _enter_or_assert_strategy(self._distribute_strategy): if distribution_strategy_context.in_cross_replica_context(): if self._aggregation == vs.VariableAggregation.SUM: raise ValueError( "SyncOnReadVariable does not support `assign_add` in " "cross-replica context when aggregation is set to " "`tf.VariableAggregation.SUM`.") return control_flow_ops.group(tuple( _assign_add_on_device(v.device, v, args[0]) for v in self._values)) else: return self.get().assign_add(*args, **kwargs) def assign(self, *args, **kwargs): with _enter_or_assert_strategy(self._distribute_strategy): if distribution_strategy_context.in_cross_replica_context(): # To preserve the sum across save and restore, we have to divide the # total across all devices when restoring a variable that was summed # when saving. tensor = args[0] if self._aggregation == vs.VariableAggregation.SUM: tensor *= 1. / len(self.devices) return control_flow_ops.group(tuple( _assign_on_device(v.device, v, tensor) for v in self._values)) else: return self.get().assign(*args, **kwargs) @property def aggregation(self): return self._aggregation def _get_cross_replica(self): if self._aggregation == vs.VariableAggregation.ONLY_FIRST_REPLICA: return self.primary return self._distribute_strategy.reduce( reduce_util.ReduceOp.from_variable_aggregation(self.aggregation), self, axis=None) def _as_graph_element(self): # pylint: disable=protected-access if distribution_strategy_context.in_cross_replica_context(): return self._get_cross_replica() return self.get()._as_graph_element() def _gather_saveables_for_checkpoint(self): """Overrides Trackable method. This allows both name-based and object-based save and restore of `SyncOnReadVariable`s. Returns: A dictionary mapping attribute names to `SaveableObject` factories. """ def _saveable_factory(name=self._common_name): return _SyncOnReadSaveable(self, name) return {trackable.VARIABLE_VALUE_KEY: _saveable_factory} # Register a conversion function for SyncOnReadVariable which allows as_ref to # be true. def _tensor_conversion_sync_on_read(var, dtype=None, name=None, as_ref=False): return ops.internal_convert_to_tensor( var.get(), dtype=dtype, name=name, as_ref=as_ref) ops.register_tensor_conversion_function(SyncOnReadVariable, _tensor_conversion_sync_on_read) def regroup(device_map, values, wrap_class=PerReplica): """Makes a nest per-replica into a nest of PerReplica/Mirrored values.""" assert isinstance(device_map, DeviceMap) assert len(values) == device_map.num_replicas_in_graph v0 = values[0] if isinstance(v0, list): for v in values[1:]: assert isinstance(v, list) assert len(v) == len(v0), ("len(v) == %d, len(v0) == %d, v: %s, v0: %s" % (len(v), len(v0), v, v0)) return [regroup(device_map, tuple(v[i] for v in values), wrap_class) for i in range(len(v0))] if isinstance(v0, tuple): for v in values[1:]: assert isinstance(v, tuple) assert len(v) == len(v0) regrouped_tuple = tuple( regroup(device_map, tuple(v[i] for v in values), wrap_class) for i in range(len(v0))) if hasattr(v0, "_fields"): # This tuple is in fact a namedtuple! Create a new namedtuple instance # and initialize it with the regrouped values: assert hasattr(type(v0), "_make") return type(v0)._make(regrouped_tuple) else: return regrouped_tuple if isinstance(v0, dict): v0keys = set(v0.keys()) for v in values[1:]: assert isinstance(v, dict), ("v[0]: %r v[i]: %r" % (v0, v)) assert set(v.keys()) == v0keys, ("v[0].keys: %s v[i].keys: %s" % (v0keys, set(v.keys()))) return {key: regroup(device_map, tuple(v[key] for v in values), wrap_class) for key in v0keys} # If exactly the same object across all devices, return it unwrapped. same_id = True for v in values[1:]: if v is not v0: same_id = False break # Consider three cases where same_id is true: # * If v0 is a DistributedVariable (a MirroredVariable or # SyncOnReadVariable, and same_id means it is the same across all # devices), we want to return it. We check DistributedVariable # specifically since it can look like it has a # _distributed_container member since its members do. # * If v0 is a member of a distributed variable, in which case # hasattr(v0, "_distributed_container") is true, we want to # return the DistributedVariable that contains it using the # _distributed_container logic below. This case can trigger # same_id when there is only one device. # * In any other situation, same_id means we return v0. if same_id and (isinstance(v0, DistributedVariable) or not hasattr(v0, "_distributed_container")): return v0 # Detect the case where each device has a parallel component of the # same MirroredVariable (or SyncOnReadVariable). In this case we # want to return the containing MirroredVariable, after a bunch of # sanity checking. In particular, each component should have the # same container, and the devices of the variables should match the # keys of the per-replica dictionary. if hasattr(v0, "_distributed_container"): # pylint: disable=protected-access assert not isinstance(v0, MirroredVariable), ( "ids = %s, values = %s" % ([id(v) for v in values], values)) assert device_map.is_device_in_replica(v0.device, 0), ( "v0.device = %s, device_map = %s" % (v0.device, device_map)) distributed_container = v0._distributed_container() assert distributed_container is not None for r, v in enumerate(values[1:]): assert device_map.is_device_in_replica(v.device, r + 1), ( "v.device = %s, r = %d, device_map = %s" % (v.device, r + 1, device_map)) assert distributed_container is v._distributed_container() return distributed_container # pylint: enable=protected-access return wrap_class(device_map, values) def select_replica(replica_id, structured): """Specialize a nest of regular & per-replica values for one replica.""" def _get(x): return x.values[replica_id] if isinstance(x, DistributedValues) else x return nest.map_structure(_get, structured) def select_device_mirrored(device, structured): """Specialize a nest of regular & mirrored values for one device.""" def _get_mirrored(x): if isinstance(x, DistributedValues): if not isinstance(x, Mirrored): raise TypeError( "Expected value to be mirrored across replicas: %s in %s." % (x, structured)) return x.get(device) else: return x return nest.map_structure(_get_mirrored, structured) def update_regroup(extended, device_map, updates, group): """Regroup for an update, with dependencies to ensure all updates execute.""" # TODO(josh11b): Replace "Mirrored" here with a function that does the following # so we can avoid all these nest operations. regrouped = regroup(device_map, updates, Mirrored) if not group: return nest.map_structure(extended._local_results, regrouped) # pylint: disable=protected-access grouped_flat = [] for u in nest.flatten(regrouped): if isinstance(u, DistributedValues): g = extended._group(u) # pylint: disable=protected-access if u.is_tensor_like: # Make sure we run all updates. Without this, something like # session.run(extended.update(...)) may only update one replica. values = [] for d in u.devices: with ops.device(d), ops.control_dependencies([g]): values.append(array_ops.identity(u.get(d))) g = Mirrored(u.device_map, values) else: g = u grouped_flat.append(g) return nest.pack_sequence_as(regrouped, grouped_flat) def value_container(val): """Returns the container that this per-replica `value` belongs to. Args: val: A value returned by `call_for_each_replica()` or a variable created in `scope()`. Returns: A container that `value` belongs to. If value does not belong to any container (including the case of container having been destroyed), returns the value itself. """ if (hasattr(val, "_distributed_container") and # DistributedVariable has _distributed_container defined # but we don't want to return it. not isinstance(val, DistributedVariable)): container = val._distributed_container() # pylint: disable=protected-access if container is not None: return container return val class AggregatingVariable(variables_lib.Variable): """A wrapper around a variable that aggregates updates across replicas.""" def __init__(self, strategy, v, aggregation): self._distribute_strategy = strategy self._v = v # NOTE: We don't use "_distributed_container" here because we don't want # to trigger that code path in regroup(). v._aggregating_container = weakref.ref(self) # pylint: disable=protected-access self._aggregation = aggregation def get(self): return self._v @property def distribute_strategy(self): return self._distribute_strategy def __getattr__(self, name): return getattr(self._v, name) def _assign_func(self, *args, **kwargs): with _enter_or_assert_strategy(self._distribute_strategy): f = kwargs.pop("f") if distribution_strategy_context.in_cross_replica_context(): update_device = distribute_lib.get_update_device() if update_device is not None: # We are calling an assign function in an update context. return f(self._v, *args, **kwargs) # We are calling an assign function in cross replica context, wrap it in # an update call. return self._distribute_strategy.extended.update( self, f, args=args, kwargs=kwargs) else: replica_context = distribution_strategy_context.get_replica_context() assert replica_context # We are calling an assign function in replica context. # We reduce the value we want to assign/add/sub. More details about how # we handle the different use cases can be found in the _reduce method. # We call the function with the reduced value. if self._aggregation == vs.VariableAggregation.NONE: raise ValueError(_aggregation_error_msg.format( variable_type="AggregatingVariable")) def merge_fn(strategy, value, *other_args, **other_kwargs): v = _apply_aggregation(strategy, value, self._aggregation, self) return strategy.extended.update( self, f, args=(v,) + other_args, kwargs=other_kwargs) return replica_context.merge_call(merge_fn, args=args, kwargs=kwargs) def assign_sub(self, *args, **kwargs): assign_sub_fn = lambda var, *a, **kw: var.assign_sub(*a, **kw) return self._assign_func(f=assign_sub_fn, *args, **kwargs) def assign_add(self, *args, **kwargs): assign_add_fn = lambda var, *a, **kw: var.assign_add(*a, **kw) return self._assign_func(f=assign_add_fn, *args, **kwargs) def assign(self, *args, **kwargs): assign_fn = lambda var, *a, **kw: var.assign(*a, **kw) return self._assign_func(f=assign_fn, *args, **kwargs) @property def initializer(self): return self._v.initializer def initialized_value(self): return self._v.initialized_value() @property def initial_value(self): return self._v.initial_value @property def op(self): return self._v.op def read_value(self): return self._v.read_value() def eval(self, session=None): return self._v.eval(session) @property def graph(self): return self._v.graph @property def device(self): return self._v.device @property def shape(self): return self._v.shape @property def aggregation(self): return self._aggregation @property def name(self): return self._v.name @property def dtype(self): return self._v.dtype # TODO(josh11b): Test saving & restoring. def _gather_saveables_for_checkpoint(self): return {trackable.VARIABLE_VALUE_KEY: self._v} # pylint: disable=multiple-statements def __add__(self, o): return self._v + o def __radd__(self, o): return o + self._v def __sub__(self, o): return self._v - o def __rsub__(self, o): return o - self._v def __mul__(self, o): return self._v * o def __rmul__(self, o): return o * self._v def __truediv__(self, o): return self._v / o def __rtruediv__(self, o): return o / self._v def __floordiv__(self, o): return self._v // o def __rfloordiv__(self, o): return o // self._v def __mod__(self, o): return self._v % o def __rmod__(self, o): return o % self._v def __lt__(self, o): return self._v < o def __le__(self, o): return self._v <= o def __gt__(self, o): return self._v > o def __ge__(self, o): return self._v >= o def __and__(self, o): return self._v & o def __rand__(self, o): return o & self._v def __or__(self, o): return self._v | o def __ror__(self, o): return o | self._v def __xor__(self, o): return self._v ^ o def __rxor__(self, o): return o ^ self._v def __getitem__(self, o): return self._v[o] def __pow__(self, o, modulo=None): return pow(self._v, o, modulo) def __rpow__(self, o): return pow(o, self._v) def __invert__(self): return ~self._v def __neg__(self): return -self._v def __abs__(self): return abs(self._v) def __div__(self, o): try: return self._v.__div__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rdiv__(self, o): try: return self._v.__rdiv__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __matmul__(self, o): try: return self._v.__matmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __rmatmul__(self, o): try: return self._v.__rmatmul__(o) except AttributeError: # See https://docs.python.org/3/library/constants.html#NotImplemented return NotImplemented def __str__(self): return str(self._v) def __repr__(self): return repr(self._v) def _should_act_as_resource_variable(self): """Pass resource_variable_ops.is_resource_variable check.""" pass # Register a conversion function which reads the value of the variable, # allowing instances of the class to be used as tensors. def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False): return ops.internal_convert_to_tensor( var.get(), dtype=dtype, name=name, as_ref=as_ref) ops.register_tensor_conversion_function( AggregatingVariable, _tensor_conversion_aggregate) ops.register_dense_tensor_like_type(AggregatingVariable)
tensorflow-master
tensorflow/python/distribute/values.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilites for reduce operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import enum from tensorflow.python.ops import variable_scope from tensorflow.python.util.tf_export import tf_export @tf_export("distribute.ReduceOp") class ReduceOp(enum.Enum): """Indicates how a set of values should be reduced. * `SUM`: Add all the values. * `MEAN`: Take the arithmetic mean ("average") of the values. """ # TODO(priyag): Add the following types: # `MIN`: Return the minimum of all values. # `MAX`: Return the maximum of all values. SUM = "SUM" MEAN = "MEAN" @staticmethod def from_variable_aggregation(aggregation): mapping = { variable_scope.VariableAggregation.SUM: ReduceOp.SUM, variable_scope.VariableAggregation.MEAN: ReduceOp.MEAN, } reduce_op = mapping.get(aggregation) if not reduce_op: raise ValueError("Could not convert from `tf.VariableAggregation` %s to" "`tf.distribute.ReduceOp` type" % aggregation) return reduce_op
tensorflow-master
tensorflow/python/distribute/reduce_util.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for training.moving_averages when using a DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables from tensorflow.python.training import moving_averages all_combinations = combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.tpu_strategy, ], mode=["graph"]) class AssignMovingAveragesTest(test.TestCase, parameterized.TestCase): @combinations.generate(all_combinations) def testReplicaModeWithoutZeroDebias(self, distribution): replica_id = [0] def replica_fn(): var = variables.Variable([10.0, 11.0]) val = constant_op.constant([1.0 + replica_id[0], 2.0 - replica_id[0]]) replica_id[0] += 1 decay = 0.25 assign = moving_averages.assign_moving_average( var, val, decay, zero_debias=False) return var, assign with distribution.scope(), self.cached_session() as sess: var, assign = distribution.extended.call_for_each_replica(replica_fn) variables.global_variables_initializer().run() self.assertAllClose([10.0, 11.0], var.eval()) sess.run(distribution.experimental_local_results(assign)) # Mean of val across calls to replica_fn(). average_val = [1.0 + 0.5 * (replica_id[0] - 1), 2.0 - 0.5 * (replica_id[0] - 1)] val_weight = 1.0 - 0.25 self.assertAllClose( [10.0 * 0.25 + average_val[0] * val_weight, 11.0 * 0.25 + average_val[1] * val_weight], var.eval()) @combinations.generate(all_combinations) def testReplicaMode(self, distribution): replica_id = [0] def replica_fn(): var = variables.Variable([0.0, 0.0]) val = constant_op.constant([1.0 + replica_id[0], 2.0 - replica_id[0]]) replica_id[0] += 1 decay = 0.25 assign = moving_averages.assign_moving_average(var, val, decay) return var, assign.op with distribution.scope(), self.cached_session() as sess: var, assign_op = distribution.extended.call_for_each_replica(replica_fn) variables.global_variables_initializer().run() self.assertAllClose([0.0, 0.0], var.eval()) sess.run(distribution.experimental_local_results(assign_op)) # Mean of val across calls to replica_fn(). average_val = [1.0 + 0.5 * (replica_id[0] - 1), 2.0 - 0.5 * (replica_id[0] - 1)] self.assertAllClose(average_val, var.eval()) @combinations.generate(all_combinations) def testCrossDeviceWithoutZeroDebias(self, distribution): with distribution.scope(), self.cached_session() as sess: var = variables.Variable([10.0, 11.0]) val = constant_op.constant([1.0, 2.0]) decay = 0.25 # NOTE(josh11b): We currently generate an error if val is a PerReplica # value. assign = moving_averages.assign_moving_average( var, val, decay, zero_debias=False) variables.global_variables_initializer().run() self.assertAllClose([10.0, 11.0], var.eval()) sess.run(assign) average_val = [1.0, 2.0] val_weight = 1.0 - 0.25 self.assertAllClose( [10.0 * 0.25 + average_val[0] * val_weight, 11.0 * 0.25 + average_val[1] * val_weight], var.eval()) # Also try assign.op. sess.run(assign.op) orig_weight = 0.25 * 0.25 val_weight = 1.0 - orig_weight self.assertAllClose( [10.0 * orig_weight + average_val[0] * val_weight, 11.0 * orig_weight + average_val[1] * val_weight], var.eval()) @combinations.generate(all_combinations) def testCrossDevice(self, distribution): with distribution.scope(), self.cached_session() as sess: var = variables.Variable([0.0, 0.0]) val = array_ops.placeholder(dtypes.float32) decay = 0.25 # NOTE(josh11b): We currently generate an error if val is a PerReplica # value. assign = moving_averages.assign_moving_average(var, val, decay) variables.global_variables_initializer().run() self.assertAllClose([0.0, 0.0], var.eval()) sess.run(assign, feed_dict={val: [1.0, 2.0]}) self.assertAllClose([1.0, 2.0], var.eval()) # Also try assign.op. sess.run(assign.op, feed_dict={val: [10.0, 0.0]}) self.assertAllClose( [(1.0 * 0.25 + 10.0) / (1.0 * 0.25 + 1.0), (2.0 * 0.25 + 0.0) / (1.0 * 0.25 + 1.0)], var.eval()) @combinations.generate(all_combinations) def testAssignVariable(self, distribution): def replica_fn(): var = variables.Variable([10.0, 11.0]) # Here we expect to check the case when input value are variable. val = variables.Variable([1., 2.]) decay = 0.25 assign = moving_averages.assign_moving_average( var, val, decay, zero_debias=False) return var, assign with distribution.scope(), self.cached_session() as sess: var, assign = distribution.extended.call_for_each_replica(replica_fn) variables.global_variables_initializer().run() self.assertAllClose([10.0, 11.0], var.eval()) sess.run(distribution.experimental_local_results(assign)) self.assertAllClose( [10 * 0.25 + 1. * (1 - 0.25), 11 * 0.25 + 2. * (1 - 0.25)], var.eval()) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/moving_averages_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for multi-worker distribution strategies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os from tensorflow.core.protobuf import cluster_pb2 from tensorflow.python.distribute import distribute_coordinator_context as dc_context from tensorflow.python.training import server_lib def normalize_cluster_spec(cluster_spec): """Makes `cluster_spec` into a `ClusterSpec` object. Args: cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. Returns: a `ClusterSpec` object. Raises: ValueError: if `cluster_spec` is not a dict or a `ClusterSpec` or a `ClusterDef`. """ if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)): return server_lib.ClusterSpec(cluster_spec) elif not isinstance(cluster_spec, server_lib.ClusterSpec): raise ValueError( "`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a " "`tf.train.ClusterDef` object") return cluster_spec # TODO(yuefengz): add more validations. def _validate_cluster_spec(cluster_spec, task_type, task_id): """Validates `cluster_spec`. It checks: 0) None of `cluster_spec`, `task_type`, and `task_id` is `None`. 1) task type is one of "chief", "worker" or "evaluator". 2) whether there is such a task type as `task_type` in the `cluster_spec`. 3) whether there is at most one "chief" job. 4) whether there is at most one "evaluator" job. 5) whether the `task_id` is smaller than the number of tasks for that particular `task_type`. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated. task_type: string indicating the type of the task. task_id: task_id: the id of the `task_type` in this cluster. Throws: ValueError: if `cluster_spec` fails any check. """ if cluster_spec is None or task_type is None or task_id is None: raise ValueError( "None of `cluster_spec`, `task_type`, and `task_id` should be `None`.") cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() if task_type not in ("chief", "worker", "evaluator", "ps"): raise ValueError( "Unrecognized task_type: %r, valid task types are: \"chief\", " "\"worker\", \"evaluator\" and \"ps\"." % task_type) if task_type and task_type not in cluster_spec: raise ValueError("`task_type` %r not found in cluster_spec." % task_type) if len(cluster_spec.get("chief", [])) > 1: raise ValueError("There must be at most one 'chief' job.") if len(cluster_spec.get("evaluator", [])) > 1: raise ValueError("There must be at most one 'evaluator' job.") if task_id >= len(cluster_spec[task_type]): raise ValueError( "The `task_id` %d exceeds the maximum id of %s." % (task_id, task_type)) def is_chief(cluster_spec=None, task_type=None, task_id=None): """Returns whether the given task is chief in the cluster. Since there is at most one evaluator and the evaluator itself should be independent of the training cluster, the evaluator job is also a chief job on its own. If this is currently running under a `_WorkerContext` of distribute coordinator, the arguments can be omitted as the result is already available. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the cluster configurations. task_type: the task type in the cluster. task_id: the task id in the cluster. Returns: a boolean indicating whether the given task is chief. Raises: ValueError: if `task_type` is not in the `cluster_spec` or `task_id` exceeds the maximum id of the `task_type`. """ if has_worker_context(): # If a worker context exists, use the value provided by it. return dc_context.get_current_worker_context().is_chief _validate_cluster_spec(cluster_spec, task_type, task_id) cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() if task_type == "chief" or task_type == "evaluator": return True # If chief not in the cluster_spec, use the first worker as chief. This is # common in CollectiveAllReduceStrategy. if ("chief" not in cluster_spec and task_type == "worker" and task_id == 0): return True return False def collective_leader(cluster_spec, task_type, task_id): """Return the job name for the leader of for collective ops. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the cluster configurations. task_type: the task type in the cluster. task_id: the task id in the cluster. Returns: a string indicating the leader job name or empty string if no need to set leader job. """ cluster_spec = normalize_cluster_spec(cluster_spec) # No need to set collective leader for local. if not cluster_spec.as_dict(): return "" _validate_cluster_spec(cluster_spec, task_type, task_id) # Only one evaluator, so no need to set collective leader. if task_type == "evaluator": return "" # Use chief if chief is in the cluster. if "chief" in cluster_spec.jobs: return "/job:chief/replica:0/task:0" # Use worker 0 if no chief job. assert "worker" in cluster_spec.jobs return "/job:worker/replica:0/task:0" def worker_count(cluster_spec, task_type): """Returns the number of workers in the cluster.""" _validate_cluster_spec(cluster_spec, task_type, task_id=0) cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() # Other jobs such as "ps" shouldn't call this function. if task_type not in ["chief", "worker", "evaluator"]: raise ValueError("Unexpected `task_type` %r" % task_type) if task_type == "evaluator": # The "evaluator" is in its own cluster or its own partition of a cluster. # So we don't have to count "chief" or "worker" if the current task is an # "evaluator". return len(cluster_spec["evaluator"]) else: # In the non-evaluator case, we return the total number of "chief" and # "worker" tasks as the "chief" is also a worker. return (len(cluster_spec.get("chief", [])) + len( cluster_spec.get("worker", []))) def id_in_cluster(cluster_spec, task_type, task_id): """Returns a unique id for the task in the `task_type`'s cluster. It returns an id ranging from [0, `worker_count(task_type, task_id)`). Note: this function assumes that "evaluate" job is in its own cluster or its own partition of a cluster. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object to be validated. task_type: string indicating the type of the task. task_id: the id of the `task_type` in this cluster. Returns: an int indicating the unique id. Throws: ValueError: if `task_type` is not "chief", "worker" or "evaluator". """ _validate_cluster_spec(cluster_spec, task_type, task_id) cluster_spec = normalize_cluster_spec(cluster_spec).as_dict() # The "chief" job has always id 0 and there is at most one and "worker" jobs # come after it. if task_type == "chief": return 0 if task_type == "worker": return task_id + len(cluster_spec.get("chief", [])) # The "evaluator" is in its own cluster or its own partition of a cluster. if task_type == "evaluator": return task_id # We currently don't assign ids to other tasks. raise ValueError("There is no id for task_type %r" % task_type) def in_multi_worker_mode(): """Whether the program is operating in Multi-Worker setting.""" # TODO(rchao): Consider a warning if user uses multiple `model` method # calls in multi-worker setting. tf_config = json.loads(os.environ.get("TF_CONFIG", "{}")) cluster_spec = server_lib.ClusterSpec(tf_config.get("cluster", {})) return tf_config and "master" not in cluster_spec.jobs def should_save_checkpoint(): """Returns whether the current worker should save checkpoints. In multi-worker training, if saving checkpoint is requested by user, or needed for fault-tolerance, the cluster should save checkpoint but not necessarily every worker in the cluster should. Returns: Whether this particular worker in the cluster should save checkpoints. """ return dc_context.get_current_worker_context().should_checkpoint def should_load_checkpoint(): """Returns whether the current worker should load checkpoints. In multi-worker training, if loading checkpoint is requested by user, or needed for fault-tolerance, the cluster should load checkpoint but not necessarily every worker in the cluster should. Returns: Whether this particular worker in the cluster should load checkpoints. """ return dc_context.get_current_worker_context().experimental_should_init def wait_for_other_workers(): """Waits for other workers to reach the same call to this method.""" return dc_context.get_current_worker_context().wait_for_other_workers() def has_worker_context(): """Returns whether a worker context has been entered.""" return dc_context.get_current_worker_context() is not None
tensorflow-master
tensorflow/python/distribute/multi_worker_util.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility to re-use variables created on first device on subsequent devices.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re _VARIABLE_UNIQUIFYING_REGEX = re.compile(r"_\d/") _VARIABLE_UNIQUIFYING_REGEX_AT_END = re.compile(r"_\d$") def _canonicalize_variable_name(name): # If no name is specified, uses default name "Variable". if name is None: return "Variable" # Replace all instances of "_<num>/" with "/" name = _VARIABLE_UNIQUIFYING_REGEX.sub("/", name) # Replace any instances of "_<num>" at the end of the string with "" name = _VARIABLE_UNIQUIFYING_REGEX_AT_END.sub("", name) return name def make_fn(shared_variable_store, device_id): """Construct the variable creator function for device `device_id`. Constructs custom variable creator functions for the given device. On first device (device_id == 0), it creates the variable using the `next_creator`, and stores it in the provided `shared_variable_store`. On all other devices (device_id > 0), it tries to re-use the variable already created with the same name. If no such variable exists, it throws an error. Additionally, we de-uniquify variable names before checking for matches. This helps re-use variables which are intended to be the same but have different names due to variable uniquification happening upstream. Since this might mean we may have multiple variables with the same canonical name, we store them in a list per canonical name and return them in the same order as well. Args: shared_variable_store: A dictionary that we will use to store variables created on the first device, and re-used by creators for other devices. device_id: Integer index of the device whose creator should be constructed. Returns: An appropriate creator function based on device_id. """ variable_scope_access_index = {} assert isinstance(device_id, int) def create_new_variable(next_creator, *args, **kwargs): """Create the variable using `next_creator` and store it.""" canonical_name = _canonicalize_variable_name(kwargs.get("name")) v = next_creator(*args, **kwargs) if canonical_name not in shared_variable_store: shared_variable_store[canonical_name] = [] shared_variable_store[canonical_name].append(v) return v def reuse_variable(next_creator, *args, **kwargs): """Re-use existing variable from store with same name (in order).""" del next_creator, args name = kwargs.get("name") canonical_name = _canonicalize_variable_name(name) try: variable_index = variable_scope_access_index.get(canonical_name, 0) v = shared_variable_store[canonical_name][variable_index] # TODO(priyag): Make this variable re-use more robust by adding checks # that the requested shape and dtype match the existing variable. variable_scope_access_index[canonical_name] = variable_index + 1 return v except (KeyError, IndexError): raise RuntimeError( "Tried to create variable {} with mismatching name on device {}". format(name, device_id)) if device_id == 0: return create_new_variable else: return reuse_variable
tensorflow-master
tensorflow/python/distribute/shared_variable_creator.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for input pipeline modifications for distribution strategies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers from tensorflow.python.distribute import input_ops from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.lib.io import python_io from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.util import compat class AutoShardDatasetTest(test.TestCase): def setUp(self): super(AutoShardDatasetTest, self).setUp() self._num_files = 10 self._num_records = 4 self._num_shards = 2 self._shard_index = 0 self._record_bytes = 10 def _getNext(self, dataset): if context.executing_eagerly(): iterator = iter(dataset) return iterator._next_internal # pylint: disable=protected-access else: iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() return lambda: get_next def _record(self, r, f): return compat.as_bytes("Record %d of file %d" % (r, f)) def _text_line(self, r, f): return compat.as_bytes("Text line %d of file %d" % (r, f)) def _fixed_length_record(self, r, f): return compat.as_bytes(str((r * f) % 10) * self._record_bytes) def _createTFRecordFiles(self): filenames = [] for i in range(self._num_files): fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) filenames.append(fn) writer = python_io.TFRecordWriter(fn) for j in range(self._num_records): record = self._record(j, i) writer.write(record) writer.close() return filenames def _createTextFiles(self): filenames = [] for i in range(self._num_files): fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i) filenames.append(fn) contents = [] for j in range(self._num_records): contents.append(self._text_line(j, i)) if j + 1 != self._num_records or i == 0: contents.append(b"\r\n") contents = b"".join(contents) with open(fn, "wb") as f: f.write(contents) return filenames def _createFixedLengthRecordFiles(self): filenames = [] for i in range(self._num_files): fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i) filenames.append(fn) with open(fn, "wb") as f: for j in range(self._num_records): f.write(self._fixed_length_record(j, i)) return filenames def _verifySimpleShardingOutput(self, dataset, record_fn): next_element_fn = self._getNext(dataset) with self.cached_session(): for f in range(self._shard_index, self._num_files, self._num_shards): for r in range(self._num_records): self.assertAllEqual(record_fn(r, f), self.evaluate(next_element_fn())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element_fn()) @test_util.run_in_graph_and_eager_modes def testTFRecordDataset(self): dataset = readers.TFRecordDataset(self._createTFRecordFiles()) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) self._verifySimpleShardingOutput(dataset, self._record) @test_util.run_in_graph_and_eager_modes def testFlatMap(self): dataset = dataset_ops.Dataset.from_tensor_slices( self._createTFRecordFiles()) dataset = dataset.flat_map(readers.TFRecordDataset) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) self._verifySimpleShardingOutput(dataset, self._record) @test_util.run_in_graph_and_eager_modes def testInterleave(self): dataset = dataset_ops.Dataset.from_tensor_slices( self._createTFRecordFiles()) dataset = dataset.interleave( readers.TFRecordDataset, cycle_length=4, block_length=self._num_records) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) # Since block_length == num records in each file, the output will still # contain records in order of files. self._verifySimpleShardingOutput(dataset, self._record) @test_util.run_in_graph_and_eager_modes def testListfiles(self): filenames = self._createTFRecordFiles() file_pattern = filenames[0].rsplit(os.sep, 1)[0] + "/tf_record.*.txt" dataset = dataset_ops.Dataset.list_files(file_pattern, shuffle=False) dataset = dataset.flat_map(readers.TFRecordDataset) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) next_element_fn = self._getNext(dataset) actual, expected = [], [] for f in range(self._shard_index, self._num_files, self._num_shards): for r in range(self._num_records): actual.append(self.evaluate(next_element_fn())) expected.append(self._record(r, f)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element_fn()) self.assertAllEqual(expected, actual) @test_util.run_in_graph_and_eager_modes def testComplexPipeline(self): # Setup a complex input pipeline. batch_size = 2 num_epochs = 5 dataset = dataset_ops.Dataset.from_tensor_slices( self._createTFRecordFiles()) dataset = dataset.shuffle(buffer_size=self._num_files) dataset = dataset.flat_map(readers.TFRecordDataset) dataset = dataset.prefetch(buffer_size=batch_size) dataset = dataset.shuffle(2 * self._num_files * self._num_records) dataset = dataset.repeat(num_epochs) dataset = dataset.map(lambda x: x) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size=None) # Auto shard. dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) # Verify output. next_element_fn = self._getNext(dataset) actual = [] num_iterations = (self._num_files * self._num_records * num_epochs) // ( self._num_shards * batch_size) for _ in range(num_iterations): actual.extend(self.evaluate(next_element_fn())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element_fn()) expected = [] for f in range(0, self._num_files, self._num_shards): for r in range(self._num_records): expected.append(self._record(r, f)) expected *= num_epochs self.assertAllEqual(sorted(expected), sorted(actual)) @test_util.run_in_graph_and_eager_modes def testZip(self): dataset1 = readers.TFRecordDataset(self._createTFRecordFiles()) dataset2 = readers.TextLineDataset(self._createTextFiles()) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) record_fn = lambda r, f: (self._record(r, f), self._text_line(r, f)) self._verifySimpleShardingOutput(dataset, record_fn) @test_util.run_in_graph_and_eager_modes def testConcat(self): dataset1 = readers.TFRecordDataset(self._createTFRecordFiles()) dataset2 = readers.TextLineDataset(self._createTextFiles()) dataset = dataset1.concatenate(dataset2) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) next_element_fn = self._getNext(dataset) for f in range(self._shard_index, self._num_files, self._num_shards): for r in range(self._num_records): self.assertAllEqual( self._record(r, f), self.evaluate(next_element_fn())) for f in range(self._shard_index, self._num_files, self._num_shards): for r in range(self._num_records): self.assertAllEqual( self._text_line(r, f), self.evaluate(next_element_fn())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element_fn()) @test_util.run_in_graph_and_eager_modes def testTextLineReader(self): dataset = readers.TextLineDataset(self._createTextFiles()) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) self._verifySimpleShardingOutput(dataset, self._text_line) @test_util.run_in_graph_and_eager_modes def testTextLineReaderWithFlatMap(self): dataset = readers.TextLineDataset(self._createTextFiles()) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) self._verifySimpleShardingOutput(dataset, self._text_line) @test_util.run_in_graph_and_eager_modes def testFixedLengthReaderWithFlatMap(self): dataset = readers.FixedLengthRecordDataset( self._createFixedLengthRecordFiles(), self._record_bytes) dataset = input_ops.auto_shard_dataset( dataset, self._num_shards, self._shard_index) self._verifySimpleShardingOutput(dataset, self._fixed_length_record) # A dataset that creates two variant tensors. class _TestDataset(dataset_ops.UnaryUnchangedStructureDataset): def __init__(self, input_dataset): self._input_dataset = input_dataset temp_variant_tensor = gen_dataset_ops.prefetch_dataset( input_dataset._variant_tensor, buffer_size=1, **dataset_ops.flat_structure(self)) variant_tensor = gen_dataset_ops.model_dataset( temp_variant_tensor, **dataset_ops.flat_structure(self)) super(_TestDataset, self).__init__(input_dataset, variant_tensor) class CloneDatasetTest(test.TestCase): def _assert_datasets_equal(self, ds1, ds2): # First lets assert the structure is the same. self.assertTrue( ds1._element_structure.is_compatible_with(ds2._element_structure)) self.assertTrue( ds2._element_structure.is_compatible_with(ds1._element_structure)) # Now create iterators on both and assert they produce the same values. it1 = dataset_ops.make_initializable_iterator(ds1) it2 = dataset_ops.make_initializable_iterator(ds2) get_next1 = it1.get_next() get_next2 = it2.get_next() with self.cached_session(): self.evaluate([it1.initializer, it2.initializer]) val1, val2 = self.evaluate([get_next1, get_next2]) self.assertEqual(val1, val2) @test_util.run_deprecated_v1 def testOnlySource(self): ds = dataset_ops.Dataset.range(10) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testSimplePipeline(self): ds = dataset_ops.Dataset.range(10).map(math_ops.square) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testConcat(self): ds1 = dataset_ops.Dataset.range(10) ds2 = dataset_ops.Dataset.range(10) ds = ds1.concatenate(ds2) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testZip(self): ds1 = dataset_ops.Dataset.range(10) ds2 = dataset_ops.Dataset.range(10) ds = dataset_ops.Dataset.zip((ds1, ds2)) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) @test_util.run_deprecated_v1 def testMultipleVariantTensors(self): ds = dataset_ops.Dataset.range(10) ds = _TestDataset(ds) cloned_ds = input_ops._clone_dataset(ds) self._assert_datasets_equal(ds, cloned_ds) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/input_ops_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Strategy and optimizer combinations for combinations.combine().""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import tf2 from tensorflow.python.distribute import central_storage_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy as mirrored_lib from tensorflow.python.distribute import one_device_strategy as one_device_lib from tensorflow.python.distribute import tpu_strategy as tpu_lib from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver from tensorflow.python.eager import context from tensorflow.python.framework import config from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2 from tensorflow.python.keras.optimizer_v2 import adam as adam_keras_v2 from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2 from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_keras_v2 from tensorflow.python.tpu import device_assignment as device_assignment_lib from tensorflow.python.tpu import tpu_strategy_util from tensorflow.python.training import adagrad from tensorflow.python.training import adam from tensorflow.python.training import gradient_descent from tensorflow.python.training import rmsprop # pylint: disable=missing-docstring def _get_tpu_strategy_creator(steps_per_run, use_single_core=False, **kwargs): def _create_tpu_strategy(): resolver = tpu_cluster_resolver.TPUClusterResolver("") topology = tpu_strategy_util.initialize_tpu_system(resolver) device_assignment = None if use_single_core: device_assignment = device_assignment_lib.DeviceAssignment( topology, core_assignment=device_assignment_lib. SINGLE_CORE_ASSIGNMENT) # Steps per run is only supported in TF 1.x if tf2.enabled(): return tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs) else: return tpu_lib.TPUStrategyV1(resolver, steps_per_run, device_assignment, **kwargs) return _create_tpu_strategy # pylint: disable=g-long-lambda default_strategy = combinations.NamedDistribution( "Default", distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access required_gpus=None) one_device_strategy = combinations.NamedDistribution( "OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"), required_gpus=None) one_device_strategy_gpu = combinations.NamedDistribution( "OneDeviceGPU", lambda: one_device_lib.OneDeviceStrategy("/gpu:0"), required_gpus=1) one_device_strategy_on_worker_1 = combinations.NamedDistribution( "OneDeviceOnWorker1CPU", lambda: one_device_lib.OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"), # pylint: disable=line-too-long required_gpus=None) one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution( "OneDeviceOnWorker1GPU", lambda: one_device_lib.OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"), # pylint: disable=line-too-long required_gpus=1) tpu_strategy = combinations.NamedDistribution( "TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True) tpu_strategy_one_step = combinations.NamedDistribution( "TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True) tpu_strategy_one_core = combinations.NamedDistribution( "TPUOneCore", _get_tpu_strategy_creator(steps_per_run=2, use_single_core=True), required_tpu=True) tpu_strategy_one_step_one_core = combinations.NamedDistribution( "TPUOneStepOneCore", _get_tpu_strategy_creator(steps_per_run=1, use_single_core=True), required_tpu=True) mirrored_strategy_with_one_cpu = combinations.NamedDistribution( "Mirrored1CPU", lambda: mirrored_lib.MirroredStrategy(["/cpu:0"])) mirrored_strategy_with_one_gpu = combinations.NamedDistribution( "Mirrored1GPU", lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]), required_gpus=1) mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution( "MirroredCPUAndGPU", lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]), required_gpus=1) mirrored_strategy_with_two_gpus = combinations.NamedDistribution( "Mirrored2GPUs", lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]), required_gpus=2) # Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods. mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution( "Mirrored2CPU", lambda: mirrored_lib.MirroredStrategy(["/cpu:1", "/cpu:2"])) central_storage_strategy_with_two_gpus = combinations.NamedDistribution( "CentralStorage2GPUs", lambda: central_storage_strategy.CentralStorageStrategy._from_num_gpus(2), # pylint: disable=protected-access required_gpus=2) central_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution( "CentralStorageCPUAndGPU", lambda: central_storage_strategy.CentralStorageStrategy( ["/gpu:0", "/cpu:0"]), required_gpus=1) gradient_descent_optimizer_v1_fn = combinations.NamedObject( "GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2)) adagrad_optimizer_v1_fn = combinations.NamedObject( "AdagradV1", lambda: adagrad.AdagradOptimizer(0.001)) adam_optimizer_v1_fn = combinations.NamedObject( "AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1)) rmsprop_optimizer_v1_fn = combinations.NamedObject( "RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001)) # TODO(shiningsun): consider adding the other v1 optimizers optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn] gradient_descent_optimizer_keras_v2_fn = combinations.NamedObject( "GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.2)) adagrad_optimizer_keras_v2_fn = combinations.NamedObject( "AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001)) adam_optimizer_keras_v2_fn = combinations.NamedObject( "AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0)) rmsprop_optimizer_keras_v2_fn = combinations.NamedObject( "RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001)) # TODO(shiningsun): consider adding the other v2 optimizers optimizers_v2 = [ gradient_descent_optimizer_keras_v2_fn, adagrad_optimizer_keras_v2_fn ] optimizers_v1_and_v2 = optimizers_v1 + optimizers_v2 graph_and_eager_modes = ["graph", "eager"] # This function should be called in a test's `setUp` method with the # maximum value needed in any test. def set_virtual_cpus_to_at_least(num_virtual_cpus): """Create virtual CPU devices if they haven't yet been created.""" if num_virtual_cpus < 1: raise ValueError("`num_virtual_cpus` must be at least 1 not %r" % (num_virtual_cpus,)) physical_devices = config.list_physical_devices("CPU") if not physical_devices: raise RuntimeError("No CPUs found") configs = config.get_virtual_device_configuration(physical_devices[0]) if configs is None: virtual_devices = [context.VirtualDeviceConfiguration() for _ in range(num_virtual_cpus)] config.set_virtual_device_configuration( physical_devices[0], virtual_devices) else: if len(configs) < num_virtual_cpus: raise RuntimeError("Already configured with %d < %d virtual CPUs" % (len(configs), num_virtual_cpus)) def distributions_and_v1_optimizers(): """A common set of combination with DistributionStrategies and Optimizers.""" return combinations.combine( distribution=[ one_device_strategy, mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus, ], optimizer_fn=optimizers_v1) def distributions_and_v2_optimizers(): """A common set of combination with DistributionStrategies and Optimizers.""" return combinations.combine( distribution=[ one_device_strategy, mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus, ], optimizer_fn=optimizers_v2) def distributions_and_v1_and_v2_optimizers(): """A common set of combination with DistributionStrategies and Optimizers.""" return combinations.combine( distribution=[ one_device_strategy, mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus, ], optimizer_fn=optimizers_v1_and_v2) strategies_minus_tpu = [ default_strategy, one_device_strategy, one_device_strategy_gpu, mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus ] tpu_strategies = [ tpu_strategy, # steps_per_run=2 tpu_strategy_one_step ] def strategy_minus_tpu_combinations(): return combinations.combine( distribution=strategies_minus_tpu, mode=["graph", "eager"]) def tpu_strategy_combinations(): return combinations.combine(distribution=tpu_strategies, mode=["graph"]) def all_strategy_combinations(): return strategy_minus_tpu_combinations() + tpu_strategy_combinations() def all_strategy_minus_default_and_tpu_combinations(): return combinations.combine( distribution=[ one_device_strategy, one_device_strategy_gpu, mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus ], mode=["graph", "eager"]) def all_strategy_combinations_minus_default(): return (all_strategy_minus_default_and_tpu_combinations() + tpu_strategy_combinations())
tensorflow-master
tensorflow/python/distribute/strategy_combinations.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library for running a computation across multiple devices. See the guide for overview and examples: [TensorFlow v1.x](https://www.tensorflow.org/guide/distribute_strategy), [TensorFlow v2.x](https://www.tensorflow.org/alpha/guide/distribute_strategy). The intent of this library is that you can write an algorithm in a stylized way and it will be usable with a variety of different `tf.distribute.Strategy` implementations. Each descendant will implement a different strategy for distributing the algorithm across multiple devices/machines. Furthermore, these changes can be hidden inside the specific layers and other library classes that need special treatment to run in a distributed setting, so that most users' model definition code can run unchanged. The `tf.distribute.Strategy` API works the same way with eager and graph execution. *Glossary* * _Data parallelism_ is where we run multiple copies of the model on different slices of the input data. This is in contrast to _model parallelism_ where we divide up a single copy of a model across multiple devices. Note: we only support data parallelism for now, but hope to add support for model parallelism in the future. * A _device_ is a CPU or accelerator (e.g. GPUs, TPUs) on some machine that TensorFlow can run operations on (see e.g. `tf.device`). You may have multiple devices on a single machine, or be connected to devices on multiple machines. Devices used to run computations are called _worker devices_. Devices used to store variables are _parameter devices_. For some strategies, such as `tf.distribute.MirroredStrategy`, the worker and parameter devices will be the same (see mirrored variables below). For others they will be different. For example, `tf.distribute.experimental.CentralStorageStrategy` puts the variables on a single device (which may be a worker device or may be the CPU), and `tf.distribute.experimental.ParameterServerStrategy` puts the variables on separate machines called parameter servers (see below). * A _replica_ is one copy of the model, running on one slice of the input data. Right now each replica is executed on its own worker device, but once we add support for model parallelism a replica may span multiple worker devices. * A _host_ is the CPU device on a machine with worker devices, typically used for running input pipelines. * A _worker_ is defined to be the physical machine(s) containing the physical devices (e.g. GPUs, TPUs) on which the replicated computation is executed. A worker may contain one or more replicas, but contains at least one replica. Typically one worker will correspond to one machine, but in the case of very large models with model parallelism, one worker may span multiple machines. We typically run one input pipeline per worker, feeding all the replicas on that worker. * _Synchronous_, or more commonly _sync_, training is where the updates from each replica are aggregated together before updating the model variables. This is in contrast to _asynchronous_, or _async_ training, where each replica updates the model variables independently. You may also have replicas partitioned into gropus which are in sync within each group but async between groups. * _Parameter servers_: These are machines that hold a single copy of parameters/variables, used by some strategies (right now just `tf.distribute.experimental.ParameterServerStrategy`). All replicas that want to operate on a variable retrieve it at the beginning of a step and send an update to be applied at the end of the step. These can in priniciple support either sync or async training, but right now we only have support for async training with parameter servers. Compare to `tf.distribute.experimental.CentralStorageStrategy`, which puts all variables on a single device on the same machine (and does sync training), and `tf.distribute.MirroredStrategy`, which mirrors variables to multiple devices (see below). * _Mirrored variables_: These are variables that are copied to multiple devices, where we keep the copies in sync by applying the same updates to every copy. Normally would only be used with sync training. * Reductions and all-reduce: A _reduction_ is some method of aggregating multiple values into one value, like "sum" or "mean". If a strategy is doing sync training, we will perform a reduction on the gradients to a parameter from all replicas before applying the update. _All-reduce_ is an algorithm for performing a reduction on values from multiple devices and making the result available on all of those devices. Note that we provide a default version of `tf.distribute.Strategy` that is used when no other strategy is in scope, that provides the same API with reasonable default behavior. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import enum # pylint: disable=g-bad-import-order import threading import weakref import six from tensorflow.python.autograph.core import ag_ctx from tensorflow.python.autograph.impl import api as autograph from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import numpy_dataset from tensorflow.python.distribute import reduce_util from tensorflow.python.eager import context as eager_context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops.losses import loss_reduction from tensorflow.python.ops.losses import losses_impl from tensorflow.python.platform import tf_logging from tensorflow.python.util import nest from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export from tensorflow.tools.docs import doc_controls # ------------------------------------------------------------------------------ # Context tracking whether in a strategy.update() or .update_non_slot() call. _update_device = threading.local() def get_update_device(): """Get the current device if in a `tf.distribute.Strategy.update()` call.""" try: return _update_device.current except AttributeError: return None class UpdateContext(object): """Context manager when you are in `update()` or `update_non_slot()`.""" def __init__(self, device): self._device = device self._old_device = None def __enter__(self): self._old_device = get_update_device() _update_device.current = self._device def __exit__(self, exception_type, exception_value, traceback): del exception_type, exception_value, traceback _update_device.current = self._old_device # ------------------------------------------------------------------------------ # Public utility functions. @tf_export(v1=["distribute.get_loss_reduction"]) def get_loss_reduction(): """`tf.distribute.ReduceOp` corresponding to the last loss reduction. This is used to decide whether loss should be scaled in optimizer (used only for estimator + v1 optimizer use case). Returns: `tf.distribute.ReduceOp` corresponding to the last loss reduction for estimator and v1 optimizer use case. `tf.distribute.ReduceOp.SUM` otherwise. """ if not distribution_strategy_context.get_strategy()._scale_loss_for_estimator: # pylint: disable=protected-access # If we are not in Estimator context then return 'SUM'. We do not need to # scale loss in the optimizer. return reduce_util.ReduceOp.SUM last_reduction = ops.get_default_graph()._last_loss_reduction # pylint: disable=protected-access if (last_reduction == losses_impl.Reduction.SUM or last_reduction == loss_reduction.ReductionV2.SUM): return reduce_util.ReduceOp.SUM return reduce_util.ReduceOp.MEAN # ------------------------------------------------------------------------------ # Internal API for validating the current thread mode def _require_cross_replica_or_default_context_extended(extended): """Verify in cross-replica context.""" context = _get_per_thread_mode() cross_replica = context.cross_replica_context if cross_replica is not None and cross_replica.extended is extended: return if context is _get_default_replica_mode(): return strategy = extended._container_strategy() # pylint: disable=protected-access # We have an error to report, figure out the right message. if context.strategy is not strategy: _wrong_strategy_scope(strategy, context) assert cross_replica is None raise RuntimeError("Method requires being in cross-replica context, use " "get_replica_context().merge_call()") def _wrong_strategy_scope(strategy, context): # Figure out the right error message. if not distribution_strategy_context.has_strategy(): raise RuntimeError( 'Need to be inside "with strategy.scope()" for %s' % (strategy,)) else: raise RuntimeError( "Mixing different tf.distribute.Strategy objects: %s is not %s" % (context.strategy, strategy)) def require_replica_context(replica_ctx): """Verify in `replica_ctx` replica context.""" context = _get_per_thread_mode() if context.replica_context is replica_ctx: return # We have an error to report, figure out the right message. if context.replica_context is None: raise RuntimeError("Need to be inside `call_for_each_replica()`") if context.strategy is replica_ctx.strategy: # Two different ReplicaContexts with the same tf.distribute.Strategy. raise RuntimeError("Mismatching ReplicaContext.") raise RuntimeError( "Mismatching tf.distribute.Strategy objects: %s is not %s." % (context.strategy, replica_ctx.strategy)) def _require_strategy_scope_strategy(strategy): """Verify in a `strategy.scope()` in this thread.""" context = _get_per_thread_mode() if context.strategy is strategy: return _wrong_strategy_scope(strategy, context) def _require_strategy_scope_extended(extended): """Verify in a `distribution_strategy.scope()` in this thread.""" context = _get_per_thread_mode() if context.strategy.extended is extended: return # Report error. strategy = extended._container_strategy() # pylint: disable=protected-access _wrong_strategy_scope(strategy, context) # ------------------------------------------------------------------------------ # Internal context managers used to implement the DistributionStrategy # base class class _CurrentDistributionContext(object): """Context manager setting the current `tf.distribute.Strategy`. Also: overrides the variable creator and optionally the current device. """ def __init__(self, strategy, var_creator_scope, var_scope=None, default_device=None): self._context = distribution_strategy_context._CrossReplicaThreadMode( # pylint: disable=protected-access strategy) self._var_creator_scope = var_creator_scope self._var_scope = var_scope if default_device: self._device_scope = ops.device(default_device) else: self._device_scope = None self._same_scope_again_count = 0 def __enter__(self): # Allow this scope to be entered if this strategy is already in scope. if distribution_strategy_context.has_strategy(): _require_cross_replica_or_default_context_extended( self._context.strategy.extended) self._same_scope_again_count += 1 else: _push_per_thread_mode(self._context) if self._var_scope: self._var_scope.__enter__() self._var_creator_scope.__enter__() if self._device_scope: self._device_scope.__enter__() return self._context.strategy def __exit__(self, exception_type, exception_value, traceback): if self._same_scope_again_count > 0: self._same_scope_again_count -= 1 return if self._device_scope: try: self._device_scope.__exit__(exception_type, exception_value, traceback) except RuntimeError as e: six.raise_from( RuntimeError("Device scope nesting error: move call to " "tf.distribute.set_strategy() out of `with` scope."), e) try: self._var_creator_scope.__exit__( exception_type, exception_value, traceback) except RuntimeError as e: six.raise_from( RuntimeError("Variable creator scope nesting error: move call to " "tf.distribute.set_strategy() out of `with` scope."), e) if self._var_scope: try: self._var_scope.__exit__(exception_type, exception_value, traceback) except RuntimeError as e: six.raise_from( RuntimeError("Variable scope nesting error: move call to " "tf.distribute.set_strategy() out of `with` scope."), e) _pop_per_thread_mode() # TODO(yuefengz): add more replication modes. @tf_export("distribute.InputReplicationMode") class InputReplicationMode(enum.Enum): """Replication mode for input function. * `PER_WORKER`: The input function will be called on each worker independently, creating as many input pipelines as number of workers. Replicas will dequeue from the local Dataset on their worker. `tf.distribute.Strategy` doesn't manage any state sharing between such separate input pipelines. """ PER_WORKER = "PER_WORKER" @tf_export("distribute.InputContext") class InputContext(object): """A class wrapping information needed by an input function. This is a context class that is passed to the user's input fn and contains information about the compute replicas and input pipelines. The number of compute replicas (in sync training) helps compute per input pipeline batch size from the desired global batch size. Input pipeline information can be used to return a different subset of the input in each input pipeline (for e.g. shard the input pipeline, use a different input source etc). """ def __init__(self, num_input_pipelines=1, input_pipeline_id=0, num_replicas_in_sync=1): """Initializes an InputContext object. Args: num_input_pipelines: the number of input pipelines in a cluster. input_pipeline_id: the current input pipeline id, should be an int in [0,`num_input_pipelines`). num_replicas_in_sync: the number of replicas that are in sync. """ self._num_input_pipelines = num_input_pipelines self._input_pipeline_id = input_pipeline_id self._num_replicas_in_sync = num_replicas_in_sync @property def num_replicas_in_sync(self): """Returns the number of compute replicas in sync.""" return self._num_replicas_in_sync @property def input_pipeline_id(self): """Returns the input pipeline ID.""" return self._input_pipeline_id @property def num_input_pipelines(self): """Returns the number of input pipelines.""" return self._num_input_pipelines def get_per_replica_batch_size(self, global_batch_size): """Returns the per-replica batch size. Args: global_batch_size: the global batch size which should be divisible by `num_replicas_in_sync`. Returns: the per-replica batch size. Raises: ValueError: if `global_batch_size` not divisible by `num_replicas_in_sync`. """ if global_batch_size % self._num_replicas_in_sync != 0: raise ValueError("The `global_batch_size` %r is not divisible by " "`num_replicas_in_sync` %r " % (global_batch_size, self._num_replicas_in_sync)) return global_batch_size // self._num_replicas_in_sync # ------------------------------------------------------------------------------ # Base classes for all distribution strategies. # pylint: disable=line-too-long @tf_export("distribute.Strategy", v1=[]) class Strategy(object): """A list of devices with a state & compute distribution policy. See [the guide](https://www.tensorflow.org/alpha/guide/distribute_strategy) for overview and examples. In short: * You may pass descendant of `tf.distribute.Strategy` to `tf.estimator.RunConfig` to specify how a `tf.estimator.Estimator` should distribute its computation. See [guide](https://www.tensorflow.org/alpha/guide/distribute_strategy#using_tfdistributestrategy_with_estimator). * Otherwise, use `tf.distribute.Strategy.scope` to specify that a strategy should be used when building an executing your model. (This puts you in the "cross-replica context" for this strategy, which means the strategy is put in control of things like variable placement.) * If using Keras `compile`/`fit`, [that is it](https://www.tensorflow.org/alpha/guide/distribute_strategy#using_tfdistributestrategy_with_keras). * If you are writing a custom training loop, you will need to call a few more methods, [see the guide](https://www.tensorflow.org/alpha/guide/distribute_strategy#using_tfdistributestrategy_with_custom_training_loops): * Start by either creating a `tf.data.Dataset` normally or using `tf.distribute.experimental_make_numpy_dataset` to make a dataset out of a `numpy` array. * Use `tf.distribute.Strategy.experimental_distribute_dataset` to convert a `tf.data.Dataset` to something that produces "per-replica" values. If you want to manually specify how the dataset should be partitioned across replicas, use `tf.distribute.Strategy.experimental_distribute_datasets_from_function` instead. * Use `tf.distribute.Strategy.experimental_run_v2` to run a function once per replica, taking values that may be "per-replica" (e.g. from a distributed dataset) and returning "per-replica" values. This function is executed in "replica context", which means each operation is performed separately on each replica. * Finally use a method (such as `tf.distribute.Strategy.reduce`) to convert the resulting "per-replica" values into ordinary `Tensor`s. A custom training loop can be as simple as: ``` with my_strategy.scope(): @tf.function def distribute_train_epoch(dataset): def replica_fn(input): # process input and return result return result total_result = 0 for x in dataset: per_replica_result = my_strategy.experimental_run_v2(replica_fn, args=(x,)) total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_result, axis=None) return total_result dist_dataset = my_strategy.experimental_distribute_dataset(dataset) for _ in range(EPOCHS): train_result = distribute_train_epoch(dist_dataset) ``` This takes an ordinary `dataset` and `replica_fn` and runs it distributed using a particular `tf.distribute.Strategy` named `my_strategy` above. Any variables created in `replica_fn` are created using `my_strategy`'s policy, and library functions called by `replica_fn` can use the `get_replica_context()` API to implement distributed-specific behavior. You can use the `reduce` API to aggregate results across replicas and use this as a return value from one iteration over the distributed dataset. Or you can use `tf.keras.metrics` (such as loss, accuracy, etc.) to accumulate metrics across steps in a given epoch. See the [custom training loop tutorial](https://www.tensorflow.org/alpha/tutorials/distribute/training_loops) for a more detailed example. Note: `tf.distribute.Strategy` currently does not support TensorFlow's partitioned variables (where a single variable is split across multiple devices) at this time. """ # pylint: enable=line-too-long # TODO(josh11b): Partitioned computations, state; sharding # TODO(josh11b): Model parallelism: "replicas" with multiple devices; shuffling def __init__(self, extended): self._extended = extended # Flag that is used to indicate whether distribution strategy is used with # Estimator. This is required for backward compatibility of loss scaling # when using v1 optimizer with estimator. self._scale_loss_for_estimator = False if not hasattr(extended, "_retrace_functions_for_each_device"): # pylint: disable=protected-access try: extended._retrace_functions_for_each_device = ( len(extended.worker_devices) > 1) except: # pylint: disable=bare-except # Default for the case where extended.worker_devices can't return # a sensible value. extended._retrace_functions_for_each_device = True # pylint: enable=protected-access @property def extended(self): """`tf.distribute.StrategyExtended` with additional methods.""" return self._extended @tf_contextlib.contextmanager def _scale_loss_for_estimator_enabled(self): """Scope which sets a flag used for scaling losses in optimizer. Yields: `_scale_loss_for_estimator_enabled` is a context manager with a side effect, but doesn't return a value. """ self._scale_loss_for_estimator = True try: yield finally: self._scale_loss_for_estimator = False def scope(self): """Returns a context manager selecting this Strategy as current. Inside a `with strategy.scope():` code block, this thread will use a variable creator set by `strategy`, and will enter its "cross-replica context". Returns: A context manager. """ return self._extended._scope(self) # pylint: disable=protected-access @doc_controls.do_not_doc_inheritable # DEPRECATED, moving to `extended` def colocate_vars_with(self, colocate_with_variable): """DEPRECATED: use extended.colocate_vars_with() instead.""" return self._extended.colocate_vars_with(colocate_with_variable) @doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only def make_dataset_iterator(self, dataset): """DEPRECATED TF 1.x ONLY.""" return self._extended._make_dataset_iterator(dataset) # pylint: disable=protected-access @doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only def make_input_fn_iterator(self, input_fn, replication_mode=InputReplicationMode.PER_WORKER): """DEPRECATED TF 1.x ONLY.""" if replication_mode != InputReplicationMode.PER_WORKER: raise ValueError( "Input replication mode not supported: %r" % replication_mode) with self.scope(): return self.extended._make_input_fn_iterator( # pylint: disable=protected-access input_fn, replication_mode=replication_mode) def experimental_make_numpy_dataset(self, numpy_input): """Makes a dataset for input provided via a numpy array. This avoids adding `numpy_input` as a large constant in the graph, and copies the data to the machine or machines that will be processing the input. Args: numpy_input: A nest of NumPy input arrays that will be distributed evenly across all replicas. Note that lists of Numpy arrays are stacked, as that is normal `tf.data.Dataset` behavior. Returns: A `tf.data.Dataset` representing `numpy_input`. """ return self.extended.experimental_make_numpy_dataset( numpy_input, session=None) @doc_controls.do_not_generate_docs # DEPRECATED: TF 1.x only def experimental_run(self, fn, input_iterator=None): """DEPRECATED TF 1.x ONLY.""" with self.scope(): args = (input_iterator.get_next(),) if input_iterator is not None else () return self.experimental_run_v2(fn, args=args) def experimental_distribute_dataset(self, dataset): """Distributes a tf.data.Dataset instance provided via `dataset`. In a multi-worker setting, we will first attempt to distribute the dataset by attempting to detect whether the dataset is being created out of ReaderDatasets (e.g. TFRecordDataset, TextLineDataset, etc.) and if so, attempting to shard the input files. Note that there has to be at least one input file per worker. If you have less than one input file per worker, we suggest that you should disable distributing your dataset using the method below. If that attempt is unsuccessful (e.g. the dataset is created from a Dataset.range), we will shard the dataset evenly at the end by appending a `.shard` operation to the end of the processing pipeline. This will cause the entire preprocessing pipeline for all the data to be run on every worker, and each worker will do redundant work. We will print a warning if this method of sharding is selected. In this case, consider using `experimental_distribute_datasets_from_function` instead. You can disable dataset distribution using the `auto_shard` option in `tf.data.experimental.DistributeOptions`. Within each host, we will also split the data among all the worker devices (if more than one a present), and this will happen even if multi-worker sharding is disabled using the method above. The following is an example: ```python strategy = tf.distribute.MirroredStrategy() # Create a dataset dataset = dataset_ops.Dataset.TFRecordDataset([ "/a/1.tfr", "/a/2.tfr", "/a/3.tfr", /a/4.tfr"]) # Distribute that dataset dist_dataset = strategy.experimental_distribute_dataset(dataset) # Iterate over the distributed dataset for x in dist_dataset: # process dataset elements strategy.experimental_run_v2(train_step, args=(x,)) ``` Args: dataset: `tf.data.Dataset` that will be sharded across all replicas using the rules stated above. Returns: A "distributed `Dataset`", which acts like a `tf.data.Dataset` except it produces "per-replica" values. """ return self._extended._experimental_distribute_dataset(dataset) # pylint: disable=protected-access def experimental_distribute_datasets_from_function(self, dataset_fn): """Distributes `tf.data.Dataset` instances created by calls to `dataset_fn`. `dataset_fn` will be called once for each worker in the strategy. Each replica on that worker will dequeue one batch of inputs from the local `Dataset` (i.e. if a worker has two replicas, two batches will be dequeued from the `Dataset` every step). This method can be used for several purposes. For example, where `experimental_distribute_dataset` is unable to shard the input files, this method might be used to manually shard the dataset (avoiding the slow fallback behavior in `experimental_distribute_dataset`). In cases where the dataset is infinite, this sharding can be done by creating dataset replicas that differ only in their random seed. The `dataset_fn` should take an `tf.distribute.InputContext` instance where information about batching and input replication can be accessed: ``` def dataset_fn(input_context): batch_size = input_context.get_per_replica_batch_size(global_batch_size) d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size) return d.shard( input_context.num_input_pipelines, input_context.input_pipeline_id) inputs = strategy.experimental_distribute_datasets_from_function(dataset_fn) for batch in inputs: replica_results = strategy.experimental_run_v2(replica_fn, args=(batch,)) ``` IMPORTANT: The `tf.data.Dataset` returned by `dataset_fn` should have a per-replica batch size, unlike `experimental_distribute_dataset`, which uses the global batch size. This may be computed using `input_context.get_per_replica_batch_size`. Args: dataset_fn: A function taking a `tf.distribute.InputContext` instance and returning a `tf.data.Dataset`. Returns: A "distributed `Dataset`", which acts like a `tf.data.Dataset` except it produces "per-replica" values. """ return self._extended._experimental_distribute_datasets_from_function( # pylint: disable=protected-access dataset_fn) def experimental_run_v2(self, fn, args=(), kwargs=None): """Runs ops in `fn` on each replica, with the given arguments. Executes ops specified by `fn` on each replica. If `args` or `kwargs` have "per-replica" values, such as those produced by a "distributed `Dataset`", when `fn` is executed on a particular replica, it will be executed with the component of those "per-replica" values that corresponds to that replica. `fn` may call `tf.distribute.get_replica_context()` to access members such as `all_reduce`. IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being used, and whether eager execution is enabled, `fn` may be called one or more times (once for each replica). Args: fn: The function to run. The output must be a `tf.nest` of `Tensor`s. args: (Optional) Positional arguments to `fn`. kwargs: (Optional) Keyword arguments to `fn`. Returns: Merged return value of `fn` across replicas. The structure of the return value is the same as the return value from `fn`. Each element in the structure can either be "per-replica" `Tensor` objects or `Tensor`s (for example, if running on a single replica). """ with self.scope(): fn = autograph.tf_convert(fn, ag_ctx.control_status_ctx()) return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs) def reduce(self, reduce_op, value, axis): """Reduce `value` across replicas. Given a per-replica value returned by `experimental_run_v2`, say a per-example loss, the batch will be divided across all the replicas. This function allows you to aggregate across replicas and optionally also across batch elements. For example, if you have a global batch size of 8 and 2 replicas, values for examples `[0, 1, 2, 3]` will be on replica 0 and `[4, 5, 6, 7]` will be on replica 1. By default, `reduce` will just aggregate across replicas, returning `[0+4, 1+5, 2+6, 3+7]`. This is useful when each replica is computing a scalar or some other value that doesn't have a "batch" dimension (like a gradient). More often you will want to aggregate across the global batch, which you can get by specifying the batch dimension as the `axis`, typically `axis=0`. In this case it would return a scalar `0+1+2+3+4+5+6+7`. If there is a last partial batch, you will need to specify an axis so that the resulting shape is consistent across replicas. So if the last batch has size 6 and it is divided into [0, 1, 2, 3] and [4, 5], you would get a shape mismatch unless you specify `axis=0`. If you specify `tf.distribute.ReduceOp.MEAN`, using `axis=0` will use the correct denominator of 6. Contrast this with computing `reduce_mean` to get a scalar value on each replica and this function to average those means, which will weigh some values `1/8` and others `1/4`. Args: reduce_op: A `tf.distribute.ReduceOp` value specifying how values should be combined. value: A "per replica" value, e.g. returned by `experimental_run_v2` to be combined into a single tensor. axis: Specifies the dimension to reduce along within each replica's tensor. Should typically be set to the batch dimension, or `None` to only reduce across replicas (e.g. if the tensor has no batch dimension). Returns: A `Tensor`. """ # TODO(josh11b): support `value` being a nest. _require_cross_replica_or_default_context_extended(self._extended) if isinstance(reduce_op, six.string_types): reduce_op = reduce_util.ReduceOp(reduce_op.upper()) if axis is None: return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access if reduce_op == reduce_util.ReduceOp.SUM: value = self.experimental_run_v2( lambda v: math_ops.reduce_sum(v, axis=axis), args=(value,)) return self._extended._reduce(reduce_op, value) # pylint: disable=protected-access if reduce_op != reduce_util.ReduceOp.MEAN: raise TypeError("Expected `reduce_op` to be a `tf.distribute.ReduceOp`, " "not: %r" % reduce_op) # TODO(josh11b): Support list/tuple and tensor axis values. if not isinstance(axis, six.integer_types): raise TypeError("Expected `axis` to be an integer not: %r" % axis) def mean_reduce_helper(v, axis=axis): """Computes the numerator and denominator on each replica.""" numer = math_ops.reduce_sum(v, axis=axis) if v.shape.rank is not None: # Note(joshl): We support axis < 0 to be consistent with the # tf.math.reduce_* operations. if axis < 0: if axis + v.shape.rank < 0: raise ValueError( "`axis` = %r out of range for `value` with rank %d" % (axis, v.shape.rank)) axis += v.shape.rank elif axis >= v.shape.rank: raise ValueError( "`axis` = %r out of range for `value` with rank %d" % (axis, v.shape.rank)) # TF v2 returns `None` for unknown dimensions and an integer for # known dimension, whereas TF v1 returns tensor_shape.Dimension(None) # or tensor_shape.Dimension(integer). `dimension_value` hides this # difference, always returning `None` or an integer. dim = tensor_shape.dimension_value(v.shape[axis]) if dim is not None: # By returning a python value in the static shape case, we can # maybe get a fast path for reducing the denominator. return numer, dim elif axis < 0: axis = axis + array_ops.rank(v) denom = array_ops.shape_v2(v, out_type=dtypes.int64)[axis] # TODO(josh11b): Should we cast denom to v.dtype here instead of after the # reduce is complete? return numer, denom numer, denom = self.experimental_run_v2(mean_reduce_helper, args=(value,)) # TODO(josh11b): Should batch reduce here instead of doing two. numer = self._extended._reduce(reduce_util.ReduceOp.SUM, numer) # pylint: disable=protected-access denom = self._extended._reduce(reduce_util.ReduceOp.SUM, denom) # pylint: disable=protected-access denom = math_ops.cast(denom, numer.dtype) return math_ops.truediv(numer, denom) @doc_controls.do_not_doc_inheritable # DEPRECATED def unwrap(self, value): """Returns the list of all local per-replica values contained in `value`. DEPRECATED: Please use `experimental_local_results` instead. Note: This only returns values on the workers initiated by this client. When using a `tf.distribute.Strategy` like `tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by `experimental_run()`, `extended.call_for_each_replica()`, or a variable created in `scope`. Returns: A tuple of values contained in `value`. If `value` represents a single value, this returns `(value,).` """ return self._extended._local_results(value) # pylint: disable=protected-access def experimental_local_results(self, value): """Returns the list of all local per-replica values contained in `value`. Note: This only returns values on the workers initiated by this client. When using a `tf.distribute.Strategy` like `tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by `experimental_run()`, `experimental_run_v2()`, `extended.call_for_each_replica()`, or a variable created in `scope`. Returns: A tuple of values contained in `value`. If `value` represents a single value, this returns `(value,).` """ return self._extended._local_results(value) # pylint: disable=protected-access @doc_controls.do_not_doc_inheritable # DEPRECATED: TF v1.x only def group(self, value, name=None): """Shortcut for `tf.group(self.experimental_local_results(value))`.""" return self._extended._group(value, name) # pylint: disable=protected-access @property def num_replicas_in_sync(self): """Returns number of replicas over which gradients are aggregated.""" return self._extended._num_replicas_in_sync # pylint: disable=protected-access @doc_controls.do_not_doc_inheritable # DEPRECATED: see doc string def configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None): # pylint: disable=g-doc-return-or-yield,g-doc-args """DEPRECATED: use `update_config_proto` instead. Configures the strategy class. DEPRECATED: This method's functionality has been split into the strategy constructor and `update_config_proto`. In the future, we will allow passing cluster and config_proto to the constructor to configure the strategy. And `update_config_proto` can be used to update the config_proto based on the specific strategy. """ return self._extended._configure( # pylint: disable=protected-access session_config, cluster_spec, task_type, task_id) @doc_controls.do_not_generate_docs # DEPRECATED def update_config_proto(self, config_proto): """DEPRECATED TF 1.x ONLY.""" return self._extended._update_config_proto(config_proto) # pylint: disable=protected-access def __deepcopy__(self, memo): # First do a regular deepcopy of `self`. cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): setattr(result, k, copy.deepcopy(v, memo)) # One little fix-up: we want `result._extended` to reference `result` # instead of `self`. result._extended._container_strategy_weakref = weakref.ref(result) # pylint: disable=protected-access return result def __copy__(self): raise RuntimeError("Must only deepcopy DistributionStrategy.") # TF v1.x version has additional deprecated APIs @tf_export(v1=["distribute.Strategy"]) class StrategyV1(Strategy): """A list of devices with a state & compute distribution policy. See [the guide](https://www.tensorflow.org/guide/distribute_strategy) for overview and examples. Note: Not all `tf.distribute.Strategy` implementations currently support TensorFlow's partitioned variables (where a single variable is split across multiple devices) at this time. """ def make_dataset_iterator(self, dataset): """Makes an iterator for input provided via `dataset`. DEPRECATED: This method is not available in TF 2.x. Data from the given dataset will be distributed evenly across all the compute replicas. We will assume that the input dataset is batched by the global batch size. With this assumption, we will make a best effort to divide each batch across all the replicas (one or more workers). If this effort fails, an error will be thrown, and the user should instead use `make_input_fn_iterator` which provides more control to the user, and does not try to divide a batch across replicas. The user could also use `make_input_fn_iterator` if they want to customize which input is fed to which replica/worker etc. Args: dataset: `tf.data.Dataset` that will be distributed evenly across all replicas. Returns: An `tf.distribute.InputIterator` which returns inputs for each step of the computation. User should call `initialize` on the returned iterator. """ return self._extended._make_dataset_iterator(dataset) # pylint: disable=protected-access def make_input_fn_iterator(self, # pylint: disable=useless-super-delegation input_fn, replication_mode=InputReplicationMode.PER_WORKER): """Returns an iterator split across replicas created from an input function. DEPRECATED: This method is not available in TF 2.x. The `input_fn` should take an `tf.distribute.InputContext` object where information about batching and input sharding can be accessed: ``` def input_fn(input_context): batch_size = input_context.get_per_replica_batch_size(global_batch_size) d = tf.data.Dataset.from_tensors([[1.]]).repeat().batch(batch_size) return d.shard(input_context.num_input_pipelines, input_context.input_pipeline_id) with strategy.scope(): iterator = strategy.make_input_fn_iterator(input_fn) replica_results = strategy.experimental_run(replica_fn, iterator) ``` The `tf.data.Dataset` returned by `input_fn` should have a per-replica batch size, which may be computed using `input_context.get_per_replica_batch_size`. Args: input_fn: A function taking a `tf.distribute.InputContext` object and returning a `tf.data.Dataset`. replication_mode: an enum value of `tf.distribute.InputReplicationMode`. Only `PER_WORKER` is supported currently, which means there will be a single call to `input_fn` per worker. Replicas will dequeue from the local `tf.data.Dataset` on their worker. Returns: An iterator object that should first be `.initialize()`-ed. It may then either be passed to `strategy.experimental_run()` or you can `iterator.get_next()` to get the next value to pass to `strategy.extended.call_for_each_replica()`. """ return super(StrategyV1, self).make_input_fn_iterator( input_fn, replication_mode) def experimental_make_numpy_dataset(self, numpy_input, session=None): """Makes a dataset for input provided via a numpy array. This avoids adding `numpy_input` as a large constant in the graph, and copies the data to the machine or machines that will be processing the input. Args: numpy_input: A nest of NumPy input arrays that will be distributed evenly across all replicas. Note that lists of Numpy arrays are stacked, as that is normal `tf.data.Dataset` behavior. session: (TensorFlow v1.x graph execution only) A session used for initialization. Returns: A `tf.data.Dataset` representing `numpy_input`. """ return self.extended.experimental_make_numpy_dataset( numpy_input, session=session) def experimental_run(self, fn, input_iterator=None): # pylint: disable=useless-super-delegation """Runs ops in `fn` on each replica, with inputs from `input_iterator`. DEPRECATED: This method is not available in TF 2.x. Please switch to using `experimental_run_v2` instead. When eager execution is enabled, executes ops specified by `fn` on each replica. Otherwise, builds a graph to execute the ops on each replica. Each replica will take a single, different input from the inputs provided by one `get_next` call on the input iterator. `fn` may call `tf.distribute.get_replica_context()` to access members such as `replica_id_in_sync_group`. IMPORTANT: Depending on the `tf.distribute.Strategy` implementation being used, and whether eager execution is enabled, `fn` may be called one or more times (once for each replica). Args: fn: The function to run. The inputs to the function must match the outputs of `input_iterator.get_next()`. The output must be a `tf.nest` of `Tensor`s. input_iterator: (Optional) input iterator from which the inputs are taken. Returns: Merged return value of `fn` across replicas. The structure of the return value is the same as the return value from `fn`. Each element in the structure can either be `PerReplica` (if the values are unsynchronized), `Mirrored` (if the values are kept in sync), or `Tensor` (if running on a single replica). """ return super(StrategyV1, self).experimental_run( fn, input_iterator) def reduce(self, reduce_op, value, axis=None): return super(StrategyV1, self).reduce(reduce_op, value, axis) reduce.__doc__ = Strategy.reduce.__doc__ def update_config_proto(self, config_proto): """Returns a copy of `config_proto` modified for use with this strategy. DEPRECATED: This method is not available in TF 2.x. The updated config has something needed to run a strategy, e.g. configuration to run collective ops, or device filters to improve distributed training performance. Args: config_proto: a `tf.ConfigProto` object. Returns: The updated copy of the `config_proto`. """ return self._extended._update_config_proto(config_proto) # pylint: disable=protected-access # NOTE(josh11b): For any strategy that needs to support tf.compat.v1, # instead descend from StrategyExtendedV1. @tf_export("distribute.StrategyExtended", v1=[]) class StrategyExtendedV2(object): """Additional APIs for algorithms that need to be distribution-aware. Note: For most usage of `tf.distribute.Strategy`, there should be no need to call these methods, since TensorFlow libraries (such as optimizers) already call these methods when needed on your behalf. Lower-level concepts: * Wrapped values: In order to represent values parallel across devices (either replicas or the devices associated with a particular value), we wrap them in a "PerReplica" or "Mirrored" object that contains a map from replica id to values. "PerReplica" is used when the value may be different across replicas, and "Mirrored" when the value are the same. * Unwrapping and merging: Consider calling a function `fn` on multiple replicas, like `experimental_run_v2(fn, args=[w])` with an argument `w` that is a wrapped value. This means `w` will have a map taking replica id `0` to `w0`, replica id `11` to `w1`, etc. `experimental_run_v2()` unwraps `w` before calling `fn`, so it calls `fn(w0)` on `d0`, `fn(w1)` on `d1`, etc. It then merges the return values from `fn()`, which can possibly result in wrapped values. For example, let's say `fn()` returns a tuple with three components: `(x, a, v0)` from replica 0, `(x, b, v1)` on replica 1, etc. If the first component is the same object `x` from every replica, then the first component of the merged result will also be `x`. If the second component is different (`a`, `b`, ...) from each replica, then the merged value will have a wrapped map from replica device to the different values. If the third component is the members of a mirrored variable (`v` maps `d0` to `v0`, `d1` to `v1`, etc.), then the merged result will be that mirrored variable (`v`). * Worker devices vs. parameter devices: Most replica computations will happen on worker devices. Since we don't yet support model parallelism, there will be one worker device per replica. When using parameter servers or central storage, the set of devices holding variables may be different, otherwise the parameter devices might match the worker devices. *Replica context vs. Cross-replica context* _replica context_ is when we are in some function that is being called once for each replica. Otherwise we are in cross-replica context, which is useful for calling `tf.distribute.Strategy` methods which operate across the replicas (like `reduce_to()`). By default you start in a replica context (the "default single replica context") and then some methods can switch you back and forth. There is a third mode you can be in called _update context_ used when updating variables. * `tf.distribute.Strategy.scope`: enters cross-replica context when no other strategy is in scope. * `tf.distribute.Strategy.experimental_run_v2`: calls a function in replica context. * `tf.distribute.ReplicaContext.merge_call`: transitions from replica context to cross-replica context. * `tf.distribute.StrategyExtended.update`: calls a function in an update context from a cross-replica context. In a replica context, you may freely read the values of variables, but you may only update their value if they specify a way to aggregate the update using the `aggregation` parameter in the variable's constructor. In a cross-replica context, you may read or write variables (writes may need to be broadcast to all copies of the variable if it is mirrored). *Sync on read variables* In some cases, such as a metric, we want to accumulate a bunch of updates on each replica independently and only aggregate when reading. This can be a big performance win when the value is read only rarely (maybe the value is only read at the end of an epoch or when checkpointing). These are variables created by passing `synchronization=ON_READ` to the variable's constructor (and some value for `aggregation`). The strategy may choose to put the variable on multiple devices, like mirrored variables, but unlike mirrored variables we don't synchronize the updates to them to make sure they have the same value. Instead, the synchronization is performed when reading in cross-replica context. In a replica context, reads and writes are performed on the local copy (we allow reads so you can write code like `v = 0.9*v + 0.1*update`). We don't allow operations like `v.assign_add` in a cross-replica context for sync on read variables; right now we don't have a use case for such updates and depending on the aggregation mode such updates may not be sensible. *Locality* Depending on how a value is produced, it will have a type that will determine how it may be used. "Per-replica" values exist on the worker devices, with a different value for each replica. They are produced by iterating through a "distributed `Dataset`" returned by `tf.distribute.Strategy.experimental_distribute_dataset` and `tf.distribute.Strategy.experimental_distribute_datasets_from_function`. They are also the typical result returned by `tf.distribute.Strategy.experimental_run_v2`. You typically can't use a per-replica value directly in a cross-replica context, without first resolving how to aggregate the values across replicas, for instance by using `tf.distribute.Strategy.reduce`. "Mirrored" values are like per-replica values, except we know that the value on all replicas are the same. We can safely read a mirrored value in a cross-replica context by using the value on any replica. You can convert a per-replica value into a mirrored value by using `tf.distribute.ReplicaContext.all_reduce`. Values can also have the same locality as a variable, which is a mirrored value but residing on the same devices as the variable (as opposed to the compute devices). Such values may be passed to a call to `tf.distribute.StrategyExtended.update` to update the value of a variable. You may use `tf.distribute.StrategyExtended.colocate_vars_with` to give a variable the same locality as another variable. This is useful, for example, for "slot" variables used by an optimizer for keeping track of statistics used to update a primary/model variable. You may convert a per-replica value to a variable's locality by using `tf.distribute.StrategyExtended.reduce_to` or `tf.distribute.StrategyExtended.batch_reduce_to`. In addition to slot variables which should be colocated with their primary variables, optimizers also define non-slot variables. These can be things like "number of step updates performed" or "beta1^t" and "beta2^t". Each strategy has some policy for which devices those variables should be copied too, called the "non-slot devices" (some subset of the parameter devices). We require that all non-slot variables are allocated on the same device, or mirrored across the same set of devices. You can use `tf.distribute.StrategyExtended.non_slot_devices` to pick a consistent set of devices to pass to both `tf.distribute.StrategyExtended.colocate_vars_with` and `tf.distribute.StrategyExtended.update_non_slot`. *How to update a variable* The standard pattern for updating variables is to: 1. In your function passed to `tf.distribute.Strategy.experimental_run_v2`, compute a list of (update, variable) pairs. For example, the update might be a the gradient of the loss with respect to the variable. 2. Switch to cross-replica mode by calling `tf.distribute.get_replica_context().merge_call()` with the updates and variables as arguments. 3. Call `tf.distribute.StrategyExtended.reduce_to(VariableAggregation.SUM, t, v)` (for one variable) or `tf.distribute.StrategyExtended.batch_reduce_to` (for a list of variables) to sum the updates. and broadcast the result to the variable's devices. 4. Call `tf.distribute.StrategyExtended.update(v)` for each variable to update its value. Steps 2 through 4 are done automatically by class `tf.keras.optimizers.Optimizer` if you call its `tf.keras.optimizers.Optimizer.apply_gradients` method in a replica context. They are also done automatically if you call an `assign*` method on a (non sync-on-read) variable that was constructed with an aggregation method (which is used to determine the reduction used in step 3). *Distribute-aware layers* Layers are generally called in a replica context, except when defining a functional model. `tf.distribute.in_cross_replica_context` will let you determine which case you are in. If in a replica context, the `tf.distribute.get_replica_context` function will return a `tf.distribute.ReplicaContext` object. The `ReplicaContext` object has an `all_reduce` method for aggregating across all replicas. Alternatively, you can update variables following steps 2-4 above. Note: For new `tf.distribute.Strategy` implementations, please put all logic in a subclass of `tf.distribute.StrategyExtended`. The only code needed for the `tf.distribute.Strategy` subclass is for instantiating your subclass of `tf.distribute.StrategyExtended` in the `__init__` method. """ def __init__(self, container_strategy): self._container_strategy_weakref = weakref.ref(container_strategy) self._default_device = None # This property is used to determine if we should set drop_remainder=True # when creating Datasets from numpy array inputs. self._require_static_shapes = False def _container_strategy(self): """Get the containing `tf.distribute.Strategy`. This should not generally be needed except when creating a new `ReplicaContext` and to validate that the caller is in the correct `scope()`. Returns: The `tf.distribute.Strategy` such that `strategy.extended` is `self`. """ container_strategy = self._container_strategy_weakref() assert container_strategy is not None return container_strategy def _scope(self, strategy): """Implementation of tf.distribute.Strategy.scope().""" def creator_with_resource_vars(*args, **kwargs): _require_strategy_scope_extended(self) kwargs["use_resource"] = True kwargs["distribute_strategy"] = strategy return self._create_variable(*args, **kwargs) def distributed_getter(getter, *args, **kwargs): if not self._allow_variable_partition(): if kwargs.pop("partitioner", None) is not None: tf_logging.log_first_n( tf_logging.WARN, "Partitioned variables are disabled when using " "current tf.distribute.Strategy.", 1) return getter(*args, **kwargs) return _CurrentDistributionContext( strategy, variable_scope.variable_creator_scope(creator_with_resource_vars), variable_scope.variable_scope( variable_scope.get_variable_scope(), custom_getter=distributed_getter), self._default_device) def _allow_variable_partition(self): return False def _create_variable(self, next_creator, *args, **kwargs): # Note: should support "colocate_with" argument. raise NotImplementedError("must be implemented in descendants") def variable_created_in_scope(self, v): """Tests whether `v` was created while this strategy scope was active. Variables created inside the strategy scope are "owned" by it: >>> with strategy.scope(): ... v = tf.Variable(1.) >>> strategy.variable_created_in_scope(v) True Variables created outside the strategy are not owned by it: >>> v = tf.Variable(1.) >>> strategy.variable_created_in_scope(v) False Args: v: A `tf.Variable` instance. Returns: True if `v` was created inside the scope, False if not. """ return v._distribute_strategy == self._container_strategy_weakref() # pylint: disable=protected-access def colocate_vars_with(self, colocate_with_variable): """Scope that controls which devices variables will be created on. No operations should be added to the graph inside this scope, it should only be used when creating variables (some implementations work by changing variable creation, others work by using a tf.compat.v1.colocate_with() scope). This may only be used inside `self.scope()`. Example usage: ``` with strategy.scope(): var1 = tf.Variable(...) with strategy.extended.colocate_vars_with(var1): # var2 and var3 will be created on the same device(s) as var1 var2 = tf.Variable(...) var3 = tf.Variable(...) def fn(v1, v2, v3): # operates on v1 from var1, v2 from var2, and v3 from var3 # `fn` runs on every device `var1` is on, `var2` and `var3` will be there # too. strategy.extended.update(var1, fn, args=(var2, var3)) ``` Args: colocate_with_variable: A variable created in this strategy's `scope()`. Variables created while in the returned context manager will be on the same set of devices as `colocate_with_variable`. Returns: A context manager. """ def create_colocated_variable(next_creator, *args, **kwargs): _require_strategy_scope_extended(self) kwargs["use_resource"] = True kwargs["colocate_with"] = colocate_with_variable return next_creator(*args, **kwargs) _require_strategy_scope_extended(self) self._validate_colocate_with_variable(colocate_with_variable) return variable_scope.variable_creator_scope(create_colocated_variable) def _validate_colocate_with_variable(self, colocate_with_variable): """Validate `colocate_with_variable` argument to `colocate_vars_with`.""" pass def _make_dataset_iterator(self, dataset): raise NotImplementedError("must be implemented in descendants") def _make_input_fn_iterator(self, input_fn, replication_mode): raise NotImplementedError("must be implemented in descendants") def _experimental_distribute_dataset(self, dataset): raise NotImplementedError("must be implemented in descendants") def _experimental_distribute_datasets_from_function(self, dataset_fn): raise NotImplementedError("must be implemented in descendants") def _reduce(self, reduce_op, value): # Default implementation until we have an implementation for each strategy. return self._local_results( self._reduce_to(reduce_op, value, device_util.current() or "/device:CPU:0"))[0] def reduce_to(self, reduce_op, value, destinations): """Combine (via e.g. sum or mean) values across replicas. Args: reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum. value: A per-replica value with one value per replica. destinations: A mirrored variable, a per-replica tensor, or a device string. The return value will be copied to all destination devices (or all the devices where the `destinations` value resides). To perform an all-reduction, pass `value` to `destinations`. Returns: A value mirrored to `destinations`. """ # TODO(josh11b): More docstring _require_cross_replica_or_default_context_extended(self) assert not isinstance(destinations, (list, tuple)) assert not isinstance(reduce_op, variable_scope.VariableAggregation) if isinstance(reduce_op, six.string_types): reduce_op = reduce_util.ReduceOp(reduce_op.upper()) assert (reduce_op == reduce_util.ReduceOp.SUM or reduce_op == reduce_util.ReduceOp.MEAN) return self._reduce_to(reduce_op, value, destinations) def _reduce_to(self, reduce_op, value, destinations): raise NotImplementedError("must be implemented in descendants") def batch_reduce_to(self, reduce_op, value_destination_pairs): """Combine multiple `reduce_to` calls into one for faster execution. Args: reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum. value_destination_pairs: A sequence of (value, destinations) pairs. See `reduce_to()` for a description. Returns: A list of mirrored values, one per pair in `value_destination_pairs`. """ # TODO(josh11b): More docstring _require_cross_replica_or_default_context_extended(self) assert not isinstance(reduce_op, variable_scope.VariableAggregation) if isinstance(reduce_op, six.string_types): reduce_op = reduce_util.ReduceOp(reduce_op.upper()) return self._batch_reduce_to(reduce_op, value_destination_pairs) def _batch_reduce_to(self, reduce_op, value_destination_pairs): return [ self.reduce_to(reduce_op, t, destinations=v) for t, v in value_destination_pairs ] def update(self, var, fn, args=(), kwargs=None, group=True): """Run `fn` to update `var` using inputs mirrored to the same devices. If `var` is mirrored across multiple devices, then this implements logic like: ``` results = {} for device, v in var: with tf.device(device): # args and kwargs will be unwrapped if they are mirrored. results[device] = fn(v, *args, **kwargs) return merged(results) ``` Otherwise this returns `fn(var, *args, **kwargs)` colocated with `var`. Neither `args` nor `kwargs` may contain per-replica values. If they contain mirrored values, they will be unwrapped before calling `fn`. Args: var: Variable, possibly mirrored to multiple devices, to operate on. fn: Function to call. Should take the variable as the first argument. args: Tuple or list. Additional positional arguments to pass to `fn()`. kwargs: Dict with keyword arguments to pass to `fn()`. group: Boolean. Defaults to True. If False, the return value will be unwrapped. Returns: By default, the merged return value of `fn` across all replicas. The merged result has dependencies to make sure that if it is evaluated at all, the side effects (updates) will happen on every replica. If instead "group=False" is specified, this function will return a nest of lists where each list has an element per replica, and the caller is responsible for ensuring all elements are executed. """ _require_cross_replica_or_default_context_extended(self) if kwargs is None: kwargs = {} with self._container_strategy().scope(): return self._update(var, fn, args, kwargs, group) def _update(self, var, fn, args, kwargs, group): raise NotImplementedError("must be implemented in descendants") def update_non_slot( self, colocate_with, fn, args=(), kwargs=None, group=True): """Runs `fn(*args, **kwargs)` on `colocate_with` devices. Args: colocate_with: The return value of `non_slot_devices()`. fn: Function to execute. args: Tuple or list. Positional arguments to pass to `fn()`. kwargs: Dict with keyword arguments to pass to `fn()`. group: Boolean. Defaults to True. If False, the return value will be unwrapped. Returns: Return value of `fn`, possibly merged across devices. """ _require_cross_replica_or_default_context_extended(self) if kwargs is None: kwargs = {} with self._container_strategy().scope(): return self._update_non_slot(colocate_with, fn, args, kwargs, group) def _update_non_slot(self, colocate_with, fn, args, kwargs, group): raise NotImplementedError("must be implemented in descendants") def _local_results(self, distributed_value): raise NotImplementedError("must be implemented in descendants") def value_container(self, value): """Returns the container that this per-replica `value` belongs to. Args: value: A value returned by `experimental_run_v2()` or a variable created in `scope()`. Returns: A container that `value` belongs to. If value does not belong to any container (including the case of container having been destroyed), returns the value itself. `value in experimental_local_results(value_container(value))` will always be true. """ raise NotImplementedError("must be implemented in descendants") def _group(self, value, name=None): """Implementation of `group`.""" value = nest.flatten(self._local_results(value)) if len(value) != 1 or name is not None: return control_flow_ops.group(value, name=name) # Special handling for the common case of one op. v, = value if hasattr(v, "op"): v = v.op return v @property def experimental_require_static_shapes(self): return self._require_static_shapes @property def _num_replicas_in_sync(self): """Returns number of replicas over which gradients are aggregated.""" raise NotImplementedError("must be implemented in descendants") @property def worker_devices(self): """Returns the tuple of all devices used to for compute replica execution. """ # TODO(josh11b): More docstring raise NotImplementedError("must be implemented in descendants") @property def parameter_devices(self): """Returns the tuple of all devices used to place variables.""" # TODO(josh11b): More docstring raise NotImplementedError("must be implemented in descendants") def non_slot_devices(self, var_list): """Device(s) for non-slot variables. Create variables on these devices in a `with colocate_vars_with(non_slot_devices(...)):` block. Update those using `update_non_slot()`. Args: var_list: The list of variables being optimized, needed with the default `tf.distribute.Strategy`. """ raise NotImplementedError("must be implemented in descendants") def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None): """Configures the strategy class.""" del session_config, cluster_spec, task_type, task_id def _update_config_proto(self, config_proto): return copy.deepcopy(config_proto) @tf_export(v1=["distribute.StrategyExtended"]) # pylint: disable=missing-docstring class StrategyExtendedV1(StrategyExtendedV2): __doc__ = StrategyExtendedV2.__doc__ def experimental_make_numpy_dataset(self, numpy_input, session=None): """Makes a dataset for input provided via a numpy array. This avoids adding `numpy_input` as a large constant in the graph, and copies the data to the machine or machines that will be processing the input. Args: numpy_input: A nest of NumPy input arrays that will be distributed evenly across all replicas. Note that lists of Numpy arrays are stacked, as that is normal `tf.data.Dataset` behavior. session: (TensorFlow v1.x graph execution only) A session used for initialization. Returns: A `tf.data.Dataset` representing `numpy_input`. """ _require_cross_replica_or_default_context_extended(self) return self._experimental_make_numpy_dataset(numpy_input, session=session) def _experimental_make_numpy_dataset(self, numpy_input, session): raise NotImplementedError("must be implemented in descendants") def broadcast_to(self, tensor, destinations): """Mirror a tensor on one device to all worker devices. Args: tensor: A Tensor value to broadcast. destinations: A mirrored variable or device string specifying the destination devices to copy `tensor` to. Returns: A value mirrored to `destinations` devices. """ assert destinations is not None # from old strategy.broadcast() # TODO(josh11b): More docstring _require_cross_replica_or_default_context_extended(self) assert not isinstance(destinations, (list, tuple)) return self._broadcast_to(tensor, destinations) def _broadcast_to(self, tensor, destinations): raise NotImplementedError("must be implemented in descendants") def experimental_run_steps_on_iterator(self, fn, iterator, iterations=1, initial_loop_values=None): """Run `fn` with input from `iterator` for `iterations` times. This method can be used to run a step function for training a number of times using input from a dataset. Args: fn: function to run using this distribution strategy. The function must have the following signature: `def fn(context, inputs)`. `context` is an instance of `MultiStepContext` that will be passed when `fn` is run. `context` can be used to specify the outputs to be returned from `fn` by calling `context.set_last_step_output`. It can also be used to capture non tensor outputs by `context.set_non_tensor_output`. See `MultiStepContext` documentation for more information. `inputs` will have same type/structure as `iterator.get_next()`. Typically, `fn` will use `call_for_each_replica` method of the strategy to distribute the computation over multiple replicas. iterator: Iterator of a dataset that represents the input for `fn`. The caller is responsible for initializing the iterator as needed. iterations: (Optional) Number of iterations that `fn` should be run. Defaults to 1. initial_loop_values: (Optional) Initial values to be passed into the loop that runs `fn`. Defaults to `None`. # TODO(priyag): Remove initial_loop_values argument when we have a mechanism to infer the outputs of `fn`. Returns: Returns the `MultiStepContext` object which has the following properties, among other things: - run_op: An op that runs `fn` `iterations` times. - last_step_outputs: A dictionary containing tensors set using `context.set_last_step_output`. Evaluating this returns the value of the tensors after the last iteration. - non_tensor_outputs: A dictionatry containing anything that was set by `fn` by calling `context.set_non_tensor_output`. """ _require_cross_replica_or_default_context_extended(self) with self._container_strategy().scope(): return self._experimental_run_steps_on_iterator(fn, iterator, iterations, initial_loop_values) def _experimental_run_steps_on_iterator(self, fn, iterator, iterations, initial_loop_values): raise NotImplementedError("must be implemented in descendants") def call_for_each_replica(self, fn, args=(), kwargs=None): """Run `fn` once per replica. `fn` may call `tf.get_replica_context()` to access methods such as `replica_id_in_sync_group` and `merge_call()`. `merge_call()` is used to communicate between the replicas and re-enter the cross-replica context. All replicas pause their execution having encountered a `merge_call()` call. After that the `merge_fn`-function is executed. Its results are then unwrapped and given back to each replica call. After that execution resumes until `fn` is complete or encounters another `merge_call()`. Example: ```python # Called once in "cross-replica" context. def merge_fn(distribution, three_plus_replica_id): # sum the values across replicas return sum(distribution.experimental_local_results(three_plus_replica_id)) # Called once per replica in `distribution`, in a "replica" context. def fn(three): replica_ctx = tf.get_replica_context() v = three + replica_ctx.replica_id_in_sync_group # Computes the sum of the `v` values across all replicas. s = replica_ctx.merge_call(merge_fn, args=(v,)) return s + v with distribution.scope(): # in "cross-replica" context ... merged_results = distribution.experimental_run_v2(fn, args=[3]) # merged_results has the values from every replica execution of `fn`. # This statement prints a list: print(distribution.experimental_local_results(merged_results)) ``` Args: fn: function to run (will be run once per replica). args: Tuple or list with positional arguments for `fn`. kwargs: Dict with keyword arguments for `fn`. Returns: Merged return value of `fn` across all replicas. """ _require_cross_replica_or_default_context_extended(self) if kwargs is None: kwargs = {} with self._container_strategy().scope(): return self._call_for_each_replica(fn, args, kwargs) def _call_for_each_replica(self, fn, args, kwargs): raise NotImplementedError("must be implemented in descendants") def read_var(self, v): """Reads the value of a variable. Returns the aggregate value of a replica-local variable, or the (read-only) value of any other variable. Args: v: A variable allocated within the scope of this `tf.distribute.Strategy`. Returns: A tensor representing the value of `v`, aggregated across replicas if necessary. """ raise NotImplementedError("must be implemented in descendants") @property def experimental_between_graph(self): """Whether the strategy uses between-graph replication or not. This is expected to return a constant value that will not be changed throughout its life cycle. """ raise NotImplementedError("must be implemented in descendants") @property def experimental_should_init(self): """Whether initialization is needed.""" raise NotImplementedError("must be implemented in descendants") @property def should_checkpoint(self): """Whether checkpointing is needed.""" raise NotImplementedError("must be implemented in descendants") @property def should_save_summary(self): """Whether saving summaries is needed.""" raise NotImplementedError("must be implemented in descendants") # A note about the difference between the context managers # `ReplicaContext` (defined here) and `_CurrentDistributionContext` # (defined above) used by `tf.distribute.Strategy.scope()`: # # * a ReplicaContext is only present during a `experimental_run_v2()` # call (except during a `merge_run` call) and in such a scope it # will be returned by calls to `get_replica_context()`. Implementers of new # Strategy descendants will frequently also need to # define a descendant of ReplicaContext, and are responsible for # entering and exiting this context. # # * Strategy.scope() sets up a variable_creator scope that # changes variable creation calls (e.g. to make mirrored # variables). This is intended as an outer scope that users enter once # around their model creation and graph definition. There is no # anticipated need to define descendants of _CurrentDistributionContext. # It sets the current Strategy for purposes of # `get_strategy()` and `has_strategy()` # and switches the thread mode to a "cross-replica context". @tf_export("distribute.ReplicaContext") class ReplicaContext(object): """`tf.distribute.Strategy` API when in a replica context. To be used inside your replicated step function, such as in a `tf.distribute.Strategy.experimental_run_v2` call. """ def __init__(self, strategy, replica_id_in_sync_group): self._strategy = strategy self._thread_context = distribution_strategy_context._InReplicaThreadMode( # pylint: disable=protected-access self) self._replica_id_in_sync_group = replica_id_in_sync_group self._summary_recording_distribution_strategy = None def __enter__(self): _push_per_thread_mode(self._thread_context) ctx = eager_context.context() def replica_id_is_zero(): return math_ops.equal(self._replica_id_in_sync_group, constant_op.constant(0)) self._summary_recording_distribution_strategy = ( ctx.summary_recording_distribution_strategy) ctx.summary_recording_distribution_strategy = replica_id_is_zero def __exit__(self, exception_type, exception_value, traceback): ctx = eager_context.context() ctx.summary_recording_distribution_strategy = ( self._summary_recording_distribution_strategy) _pop_per_thread_mode() def merge_call(self, merge_fn, args=(), kwargs=None): """Merge args across replicas and run `merge_fn` in a cross-replica context. This allows communication and coordination when there are multiple calls to a model function triggered by a call to `strategy.experimental_run_v2(model_fn, ...)`. See `tf.distribute.Strategy.experimental_run_v2` for an explanation. If not inside a distributed scope, this is equivalent to: ``` strategy = tf.distribute.get_strategy() with cross-replica-context(strategy): return merge_fn(strategy, *args, **kwargs) ``` Args: merge_fn: function that joins arguments from threads that are given as PerReplica. It accepts `tf.distribute.Strategy` object as the first argument. args: List or tuple with positional per-thread arguments for `merge_fn`. kwargs: Dict with keyword per-thread arguments for `merge_fn`. Returns: The return value of `merge_fn`, except for `PerReplica` values which are unpacked. """ require_replica_context(self) if kwargs is None: kwargs = {} return self._merge_call(merge_fn, args, kwargs) def _merge_call(self, merge_fn, args, kwargs): """Default implementation for single replica.""" _push_per_thread_mode( # thread-local, so not needed with multiple threads distribution_strategy_context._CrossReplicaThreadMode(self._strategy)) # pylint: disable=protected-access try: return merge_fn(self._strategy, *args, **kwargs) finally: _pop_per_thread_mode() @property def num_replicas_in_sync(self): """Returns number of replicas over which gradients are aggregated.""" return self._strategy.num_replicas_in_sync @property def replica_id_in_sync_group(self): """Which replica is being defined, from 0 to `num_replicas_in_sync - 1`.""" require_replica_context(self) return self._replica_id_in_sync_group @property def strategy(self): """The current `tf.distribute.Strategy` object.""" return self._strategy @property def devices(self): """The devices this replica is to be executed on, as a tuple of strings.""" require_replica_context(self) return (device_util.current(),) def all_reduce(self, reduce_op, value): """All-reduces the given `Tensor` nest across replicas. If `all_reduce` is called in any replica, it must be called in all replicas. The nested structure and `Tensor` shapes must be identical in all replicas. IMPORTANT: The ordering of communications must be identical in all replicas. Example with two replicas: Replica 0 `value`: {'a': 1, 'b': [40, 1]} Replica 1 `value`: {'a': 3, 'b': [ 2, 98]} If `reduce_op` == `SUM`: Result (on all replicas): {'a': 4, 'b': [42, 99]} If `reduce_op` == `MEAN`: Result (on all replicas): {'a': 2, 'b': [21, 49.5]} Args: reduce_op: Reduction type, an instance of `tf.distribute.ReduceOp` enum. value: The nested structure of `Tensor`s to all-reduced. The structure must be compatible with `tf.nest`. Returns: A `Tensor` nest with the reduced `value`s from each replica. """ def batch_all_reduce(strategy, *value_flat): return strategy.extended.batch_reduce_to( reduce_op, [(v, _batch_reduce_destination(v)) for v in value_flat]) if reduce_op in [reduce_util.ReduceOp.SUM, reduce_util.ReduceOp.MEAN]: # TODO(cjfj): Work out why `batch_reduce` doesn't return the correct grad. @custom_gradient.custom_gradient def grad_wrapper(*xs): ys = self.merge_call(batch_all_reduce, args=xs) # The gradient of an all-sum is itself an all-sum (all-mean, likewise). return ys, lambda *dy_s: self.all_reduce(reduce_op, dy_s) return nest.pack_sequence_as(value, grad_wrapper(*nest.flatten(value))) else: # TODO(cjfj): Implement gradients for other reductions. reduced = nest.pack_sequence_as( value, self.merge_call(batch_all_reduce, args=nest.flatten(value))) return nest.map_structure(array_ops.prevent_gradient, reduced) # TODO(josh11b): Implement `start_all_reduce(method, t)` for efficient # all-reduce. It would return a function returning the result of reducing `t` # across all replicas. The caller would wait to call this function until they # needed the reduce result, allowing an efficient implementation: # * With eager execution, the reduction could be performed asynchronously # in the background, not blocking until the result was needed. # * When constructing a graph, it could batch up all reduction requests up # to that point that the first result is needed. Most likely this can be # implemented in terms of `merge_call()` and `batch_reduce_to()`. def _batch_reduce_destination(x): """Returns the destinations for batch all-reduce.""" if isinstance(x, ops.Tensor): # One device strategies. return x.device else: return x # ------------------------------------------------------------------------------ _creating_default_strategy_singleton = False class _DefaultDistributionStrategy(StrategyV1): """Default `tf.distribute.Strategy` if none is explicitly selected.""" def __init__(self): if not _creating_default_strategy_singleton: raise RuntimeError("Should only create a single instance of " "_DefaultDistributionStrategy") super(_DefaultDistributionStrategy, self).__init__( _DefaultDistributionExtended(self)) def __deepcopy__(self, memo): del memo raise RuntimeError("Should only create a single instance of " "_DefaultDistributionStrategy") class _DefaultDistributionContext(object): """Context manager setting the default `tf.distribute.Strategy`.""" def __init__(self, strategy): def creator(next_creator, *args, **kwargs): _require_strategy_scope_strategy(strategy) return next_creator(*args, **kwargs) self._var_creator_scope = variable_scope.variable_creator_scope(creator) self._strategy = strategy self._nested_count = 0 def __enter__(self): # Allow this scope to be entered if this strategy is already in scope. if distribution_strategy_context.has_strategy(): raise RuntimeError("Must not nest tf.distribute.Strategy scopes.") if self._nested_count == 0: self._var_creator_scope.__enter__() self._nested_count += 1 return self._strategy def __exit__(self, exception_type, exception_value, traceback): self._nested_count -= 1 if self._nested_count == 0: try: self._var_creator_scope.__exit__( exception_type, exception_value, traceback) except RuntimeError as e: six.raise_from( RuntimeError("Variable creator scope nesting error: move call to " "tf.distribute.set_strategy() out of `with` scope."), e) class _DefaultDistributionExtended(StrategyExtendedV1): """Implementation of _DefaultDistributionStrategy.""" def __init__(self, container_strategy): super(_DefaultDistributionExtended, self).__init__(container_strategy) self._retrace_functions_for_each_device = False def _scope(self, strategy): """Context manager setting a variable creator and `self` as current.""" return _DefaultDistributionContext(strategy) def colocate_vars_with(self, colocate_with_variable): """Does not require `self.scope`.""" _require_strategy_scope_extended(self) return ops.colocate_with(colocate_with_variable) def variable_created_in_scope(self, v): return v._distribute_strategy is None # pylint: disable=protected-access def _experimental_distribute_dataset(self, dataset): return dataset def _make_dataset_iterator(self, dataset): return _DefaultDistributionExtended.DefaultInputIterator(dataset) def _make_input_fn_iterator(self, input_fn, replication_mode=InputReplicationMode.PER_WORKER): dataset = input_fn(InputContext()) return _DefaultDistributionExtended.DefaultInputIterator(dataset) def _experimental_make_numpy_dataset(self, numpy_input, session): numpy_flat = nest.flatten(numpy_input) vars_flat = tuple( variable_scope.variable(array_ops.zeros(i.shape, i.dtype), trainable=False, use_resource=True) for i in numpy_flat ) for v, i in zip(vars_flat, numpy_flat): numpy_dataset.init_var_from_numpy(v, i, session) vars_nested = nest.pack_sequence_as(numpy_input, vars_flat) return dataset_ops.Dataset.from_tensor_slices(vars_nested) def _broadcast_to(self, tensor, destinations): if destinations is None: return tensor else: raise NotImplementedError("TODO") def _call_for_each_replica(self, fn, args, kwargs): with ReplicaContext( self._container_strategy(), replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)): return fn(*args, **kwargs) def _reduce_to(self, reduce_op, value, destinations): # TODO(josh11b): Use destinations? del reduce_op, destinations return value def _update(self, var, fn, args, kwargs, group): # The implementations of _update() and _update_non_slot() are identical # except _update() passes `var` as the first argument to `fn()`. return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group) def _update_non_slot(self, colocate_with, fn, args, kwargs, should_group): # TODO(josh11b): Figure out what we should be passing to UpdateContext() # once that value is used for something. with UpdateContext(colocate_with): result = fn(*args, **kwargs) if should_group: return result else: return nest.map_structure(self._local_results, result) def read_var(self, replica_local_var): return array_ops.identity(replica_local_var) def _local_results(self, distributed_value): return (distributed_value,) def value_container(self, value): return value @property def _num_replicas_in_sync(self): return 1 @property def worker_devices(self): raise RuntimeError("worker_devices() method unsupported by default " "tf.distribute.Strategy.") @property def parameter_devices(self): raise RuntimeError("parameter_devices() method unsupported by default " "tf.distribute.Strategy.") def non_slot_devices(self, var_list): return min(var_list, key=lambda x: x.name) # TODO(priyag): This should inherit from `InputIterator`, once dependency # issues have been resolved. class DefaultInputIterator(object): """Default implementation of `InputIterator` for default strategy.""" def __init__(self, dataset): self._dataset = dataset if eager_context.executing_eagerly(): self._iterator = dataset.make_one_shot_iterator() else: self._iterator = dataset.make_initializable_iterator() def get_next(self): return self._iterator.get_next() def initialize(self): if eager_context.executing_eagerly(): self._iterator = self._dataset.make_one_shot_iterator() return [] else: return [self._iterator.initializer] # TODO(priyag): Delete this once all strategies use global batch size. @property def _global_batch_size(self): """Global and per-replica batching are equivalent for this strategy.""" return True # ------------------------------------------------------------------------------ # We haven't yet implemented deserialization for DistributedVariables. # So here we catch any attempts to deserialize variables # when using distribution strategies. # pylint: disable=protected-access _original_from_proto = resource_variable_ops._from_proto_fn def _from_proto_fn(v, import_scope=None): if distribution_strategy_context.has_strategy(): raise NotImplementedError( "Deserialization of variables is not yet supported when using a " "tf.distribute.Strategy.") else: return _original_from_proto(v, import_scope=import_scope) resource_variable_ops._from_proto_fn = _from_proto_fn # pylint: enable=protected-access #------------------------------------------------------------------------------- # Shorthand for some methods from distribution_strategy_context. _push_per_thread_mode = distribution_strategy_context._push_per_thread_mode # pylint: disable=protected-access _get_per_thread_mode = distribution_strategy_context._get_per_thread_mode # pylint: disable=protected-access _pop_per_thread_mode = distribution_strategy_context._pop_per_thread_mode # pylint: disable=protected-access _get_default_replica_mode = ( distribution_strategy_context._get_default_replica_mode) # pylint: disable=protected-access
tensorflow-master
tensorflow/python/distribute/distribute_lib.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the distributed values library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import itertools import os from absl.testing import parameterized from tensorflow.core.protobuf import config_pb2 from tensorflow.python.distribute import combinations from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import values from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.saved_model.model_utils import mode_keys from tensorflow.python.training import saver as saver_lib from tensorflow.python.training.tracking import util as trackable_utils from tensorflow.python.util import nest class DistributedValuesTest(test.TestCase): def testGetEager(self): with ops.device("/device:CPU:0"): one = constant_op.constant(1) two = constant_op.constant(2) device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0")) v = values.DistributedValues(device_map, (one, two)) self.assertEqual(two, v.get("/device:GPU:0")) self.assertEqual(one, v.get()) with self.assertRaises(ValueError): self.assertIsNone(v.get("/device:GPU:2")) def testGetGraph(self): with context.graph_mode(), \ ops.Graph().as_default(), \ ops.device("/device:CPU:0"): one = constant_op.constant(1) two = constant_op.constant(2) device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0")) v = values.DistributedValues(device_map, (one, two)) self.assertEqual(two, v.get("/device:GPU:0")) self.assertEqual(one, v.get()) with self.assertRaises(ValueError): self.assertIsNone(v.get("/device:GPU:2")) def testCanonicalization(self): canonical_cpu = ("/job:localhost/replica:0/task:0/device:CPU:0",) v = values.DistributedValues(values.SingleDeviceMap(""), (42,)) self.assertEqual(canonical_cpu, v.devices) v = values.DistributedValues(values.SingleDeviceMap("/device:CPU:0"), (42,)) self.assertEqual(canonical_cpu, v.devices) v = values.DistributedValues(values.SingleDeviceMap("/cpu:0"), (42,)) self.assertEqual(canonical_cpu, v.devices) v = values.DistributedValues(values.SingleDeviceMap("/CPU:0"), (42,)) self.assertEqual(canonical_cpu, v.devices) def testIsTensorLike(self): with context.graph_mode(), \ ops.Graph().as_default(), \ ops.device("/device:CPU:0"): one = constant_op.constant(1) two = constant_op.constant(2) device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0")) v = values.DistributedValues(device_map, (one, two)) self.assertEqual(two, v.get("/device:GPU:0")) self.assertEqual(one, v.get()) self.assertTrue(v.is_tensor_like) self.assertTrue(tensor_util.is_tensor(v)) def testIsTensorLikeWithAConstant(self): with context.graph_mode(), \ ops.Graph().as_default(), \ ops.device("/device:CPU:0"): one = constant_op.constant(1) two = 2.0 device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0")) v = values.DistributedValues(device_map, (one, two)) self.assertEqual(two, v.get("/device:GPU:0")) self.assertEqual(one, v.get()) self.assertFalse(v.is_tensor_like) self.assertFalse(tensor_util.is_tensor(v)) class DistributedDelegateTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testGetAttr(self): with ops.device("/device:CPU:0"): class Foo(object): def __init__(self, x): self.x = x device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0")) v = values.DistributedDelegate(device_map, (Foo(7), Foo(8))) self.assertEqual(7, v.x) with self.assertRaises(AttributeError): _ = v.y @test_util.run_in_graph_and_eager_modes def testOperatorOverride(self): with ops.device("/device:CPU:0"): device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0")) v = values.DistributedDelegate(device_map, (7, 8)) # v should act like int(7). self.assertEqual(8, v + 1) self.assertEqual(10, 3 + v) self.assertEqual(14, v + v) self.assertEqual(5, v - 2) self.assertEqual(6, 13 - v) self.assertEqual(0, v - v) self.assertEqual(14, v * 2) self.assertEqual(21, 3 * v) self.assertEqual(49, v * v) self.assertEqual(3.5, v / 2) self.assertEqual(1.5, 10.5 / v) self.assertEqual(3, v // 2) self.assertEqual(2, 15 // v) self.assertEqual(1, v % 2) self.assertEqual(2, 16 % v) self.assertTrue(v < 12) self.assertTrue(v <= 12) self.assertFalse(v > 12) self.assertFalse(v >= 12) self.assertFalse(12 < v) self.assertFalse(12 <= v) self.assertTrue(12 > v) self.assertTrue(12 >= v) self.assertEqual(3, v & 3) self.assertEqual(3, 11 & v) self.assertEqual(15, v | 8) self.assertEqual(23, 16 | v) self.assertEqual(4, v ^ 3) self.assertEqual(12, 11 ^ v) self.assertEqual(343, pow(v, 3)) self.assertEqual(3, pow(v, 3, 10)) self.assertEqual(128, pow(2, v)) self.assertEqual(-7, -v) self.assertEqual(~7, ~v) self.assertEqual(7, abs(v)) with self.assertRaises(TypeError): _ = v[2] def _device_str(d): return "/device:GPU:" + str(d) def _nested_value(d): return ("a" + d, ["b" + d, {"c": "d" + d, "e": "f" + d}, "g" + d], "h" + d) def _make_mirrored(): v = [] devices = ["/device:GPU:0", "/device:CPU:0"] for d, n, init in zip(devices, ["v", "v/replica"], [1., 2.]): with ops.device(d): v.append(variable_scope.get_variable( name=n, initializer=init, use_resource=True)) device_map = values.ReplicaDeviceMap(devices) mirrored = values.MirroredVariable(None, device_map, v, variable_scope.VariableAggregation.SUM) return v, device_map, mirrored class RegroupAndSelectDeviceTest(test.TestCase): def _is_per_replica(self, result, expected, klass=values.PerReplica): self.assertIsInstance(result, klass) # We canonicalize the devices to match the device strings returned # by PerReplica, which also does device string canonicalization. devices = [device_util.canonicalize(_device_str(i)) for i in range(len(expected))] self.assertEqual(set(devices), set(result.devices)) for i, d in enumerate(devices): self.assertEqual(expected[i], result.get(d)) self.assertEqual(expected[i], result.get(_device_str(i))) def testNested(self): device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1))) result = values.regroup(device_map, (_nested_value("1"), _nested_value("2"))) self.assertIsInstance(result, tuple) self.assertEqual(3, len(result)) self._is_per_replica(result[0], ["a1", "a2"]) self._is_per_replica(result[2], ["h1", "h2"]) self.assertIsInstance(result[1], list) self.assertEqual(3, len(result[1])) self._is_per_replica(result[1][0], ["b1", "b2"]) self._is_per_replica(result[1][2], ["g1", "g2"]) self.assertIsInstance(result[1][1], dict) self.assertEqual(set(["c", "e"]), set(result[1][1].keys())) self._is_per_replica(result[1][1]["c"], ["d1", "d2"]) self._is_per_replica(result[1][1]["e"], ["f1", "f2"]) # Also test that we can undo the merge using select_replica() self.assertEqual(_nested_value("1"), values.select_replica(0, result)) self.assertEqual(_nested_value("2"), values.select_replica(1, result)) # select_device_mirrored() should fail due to non-mirrored values with self.assertRaises(TypeError): values.select_device_mirrored(_device_str(0), result) with self.assertRaises(TypeError): values.select_device_mirrored(_device_str(1), result) def testWrapClass(self): # Normally a mirrored value would be the same across devices, but # for a test it is convenient to be able to tell the values apart. device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1))) result = values.regroup(device_map, (_nested_value("1"), _nested_value("2")), values.Mirrored) self.assertIsInstance(result, tuple) self.assertEqual(3, len(result)) self._is_per_replica(result[0], ["a1", "a2"], values.Mirrored) self._is_per_replica(result[2], ["h1", "h2"], values.Mirrored) self.assertIsInstance(result[1], list) self.assertEqual(3, len(result[1])) self._is_per_replica(result[1][0], ["b1", "b2"], values.Mirrored) self._is_per_replica(result[1][2], ["g1", "g2"], values.Mirrored) self.assertIsInstance(result[1][1], dict) self.assertEqual(set(["c", "e"]), set(result[1][1].keys())) self._is_per_replica(result[1][1]["c"], ["d1", "d2"], values.Mirrored) self._is_per_replica(result[1][1]["e"], ["f1", "f2"], values.Mirrored) # Also test that we can undo the merge using select_replica() self.assertEqual(_nested_value("1"), values.select_replica(0, result)) self.assertEqual(_nested_value("2"), values.select_replica(1, result)) # Values are marked as mirrored, so select_device_mirrored() is allowed. self.assertEqual(_nested_value("1"), values.select_device_mirrored(_device_str(0), result)) self.assertEqual(_nested_value("2"), values.select_device_mirrored(_device_str(1), result)) def testWrapAListOfTwoTuples(self): device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1))) result = values.regroup(device_map, [("1", "2"), ("3", "4")]) self.assertIsInstance(result, tuple) self.assertEqual(2, len(result)) self._is_per_replica(result[0], ("1", "3"), values.PerReplica) self._is_per_replica(result[1], ("2", "4"), values.PerReplica) def testMirroredContainer(self): if context.num_gpus() < 1 and context.executing_eagerly(): self.skipTest("A GPU is not available for this test in eager mode.") v, device_map, mirrored = _make_mirrored() result = values.regroup(device_map, v) self.assertIs(mirrored, result) def testSameId(self): foo = object() device_map = values.ReplicaDeviceMap((_device_str(0), _device_str(1))) result = values.regroup(device_map, (("a", foo), ("b", foo))) self.assertIsInstance(result, tuple) self.assertEqual(2, len(result)) self._is_per_replica(result[0], ["a", "b"]) self.assertIs(foo, result[1]) # Test select_replica(), should undo the merge done by regroup(). result_0 = values.select_replica(0, result) self.assertIsInstance(result_0, tuple) self.assertEqual(2, len(result_0)) self.assertEqual("a", result_0[0]) self.assertIs(foo, result_0[1]) result_1 = values.select_replica(1, result) self.assertIsInstance(result_1, tuple) self.assertEqual(2, len(result_1)) self.assertEqual("b", result_1[0]) self.assertIs(foo, result_1[1]) def testOneDevice(self): device_map = values.ReplicaDeviceMap((_device_str(0),)) result = values.regroup(device_map, (_nested_value("1"),)) # On one device regroup() and select_replica() are basically identity. self.assertEqual(_nested_value("1"), result) self.assertEqual(_nested_value("1"), values.select_replica(0, result)) # The one exception has to do with MirroredVariables. d = "/device:CPU:0" with ops.device(d): v = variable_scope.get_variable( name="v", initializer=1., use_resource=True) device_map = values.ReplicaDeviceMap((d,)) mirrored = values.MirroredVariable(None, device_map, (v,), variable_scope.VariableAggregation.SUM) result = values.regroup(device_map, (v,)) self.assertIs(mirrored, result) def testNamedTuple(self): # We include toy implementations of Scaffold and EstimatorSpec to # avoid a dependency on Estimator here. class Scaffold(object): pass class EstimatorSpec(collections.namedtuple( "EstimatorSpec", ["mode", "loss", "train_op", "scaffold"])): def __new__(cls, mode, loss, train_op, scaffold=None): return super(EstimatorSpec, cls).__new__( cls, mode=mode, loss=loss, train_op=train_op, scaffold=scaffold or Scaffold()) with context.graph_mode(), ops.Graph().as_default(): devices = [] created_estimator_specs = [] for device_id in range(3): spec = EstimatorSpec( mode=mode_keys.EstimatorModeKeys.TRAIN, loss=constant_op.constant(device_id / 2), train_op=array_ops.identity(constant_op.constant(device_id))) devices.append(_device_str(device_id)) created_estimator_specs.append(spec) device_map = values.ReplicaDeviceMap(devices) merged_estimator_spec = values.regroup( device_map, created_estimator_specs) self.assertIsInstance(merged_estimator_spec, EstimatorSpec) self.assertEqual(mode_keys.EstimatorModeKeys.TRAIN, merged_estimator_spec.mode) for device_id in range(3): d = _device_str(device_id) self.assertEqual(created_estimator_specs[device_id].loss, merged_estimator_spec.loss.get(d)) self.assertEqual(created_estimator_specs[device_id].train_op, merged_estimator_spec.train_op.get(d)) # Scaffold is populated by `EstimatorSpec.__new__`. self.assertEqual(created_estimator_specs[device_id].scaffold, merged_estimator_spec.scaffold.get(d)) self.assertIsInstance(created_estimator_specs[device_id].scaffold, Scaffold) # Also test that we can undo the merge using select_replica() self.assertEqual(created_estimator_specs[device_id], values.select_replica(device_id, merged_estimator_spec)) class MirroredVariableTest(test.TestCase, parameterized.TestCase): config = config_pb2.ConfigProto() config.allow_soft_placement = True @test_util.run_in_graph_and_eager_modes(config=config) def testProperties(self): if context.num_gpus() < 1 and context.executing_eagerly(): self.skipTest("A GPU is not available for this test in eager mode.") v, _, mirrored = _make_mirrored() self.assertEqual(v[0].name, mirrored.name) self.assertEqual(v[0].dtype, mirrored.dtype) self.assertEqual(v[0].shape, mirrored.shape) @test_util.run_in_graph_and_eager_modes(config=config) def testVariableOnAnotherDevice(self): v = variable_scope.get_variable( name="v", initializer=[1.], use_resource=True) device_map = values.ReplicaDeviceMap(("/job:foo/device:CPU:0",)) mirrored = values.MirroredVariable(None, device_map, (v,), variable_scope.VariableAggregation.MEAN) self.assertEqual(v.name, mirrored.name) self.assertEqual(v.dtype, mirrored.dtype) self.assertEqual(v.shape, mirrored.shape) def _assign_mirrored(self, devices, v, new): for d, var, n in zip(devices, v, new): with ops.device(d): self.evaluate(var.assign(n)) def _save_return_saver(self, sess, var): saver = saver_lib.Saver(var_list=[var]) test_dir = self.get_temp_dir() prefix = os.path.join(test_dir, "ckpt") return saver.save(sess, prefix), saver def _save(self, sess, var): save_path, _ = self._save_return_saver(sess, var) return save_path @test_util.run_in_graph_and_eager_modes(config=config) def testSaveAndRestoreMirroredOneGraph(self): if context.num_gpus() < 1 and context.executing_eagerly(): # Graph mode can work without GPU because the Placer "moves" the # variable to a CPU. In other words, if there is no GPU available, but # user requested to create a variable on GPU, Placer will ignore the # user request and assign the VarHandleOp to CPU. This requires # soft_placement, which is on by default. self.skipTest("A GPU is not available for this test in eager mode.") with self.cached_session(config=self.config) as sess: v, device_map, mirrored = _make_mirrored() devices = device_map.all_devices # Overwrite the initial values. self._assign_mirrored(devices, v, [3., 4.]) # Saves the current value of v[0], 3. save_path, saver = self._save_return_saver(sess, mirrored) # Change the values between save and restore. self._assign_mirrored(devices, v, [5., 6.]) # Restores the saved value of 3. to both variables. saver.restore(sess, save_path) self.assertEqual([3., 3.], self.evaluate([v[0], v[1]])) def _save_mirrored(self): """Save variables with mirroring, returns save_path.""" with self.session(graph=ops.Graph()) as sess: v, device_map, mirrored = _make_mirrored() devices = device_map.all_devices # Overwrite the initial values. self._assign_mirrored(devices, v, [3., 4.]) # Saves the current value of v[0], 3. save_path = self._save(sess, mirrored) # Change the values between save and restore. self._assign_mirrored(devices, v, [5., 6.]) return save_path def _save_normal(self): """Save variables without mirroring, returns save_path.""" with self.session(graph=ops.Graph()) as sess: var = variable_scope.get_variable( name="v", initializer=1., use_resource=True) # Overwrite the initial value. self.evaluate(var.assign(3.)) # Saves the current value of var, 3. save_path = self._save(sess, var) # Change the values between save and restore. self.evaluate(var.assign(5.)) return save_path def _restore_normal(self, save_path): """Restore to variables without mirroring in a fresh graph.""" with self.session(graph=ops.Graph()) as sess: var = variable_scope.get_variable( name="v", initializer=7., use_resource=True) # Overwrite the initial value. self.evaluate(var.assign(8.)) # Restores the saved value of 3. to `var`. saver = saver_lib.Saver(var_list=[var]) saver.restore(sess, save_path) self.assertEqual(3., self.evaluate(var)) def _restore_mirrored(self, save_path): """Restore to variables with mirroring in a fresh graph.""" with self.session(graph=ops.Graph()) as sess: v, device_map, mirrored = _make_mirrored() devices = device_map.all_devices # Overwrite the initial values. self._assign_mirrored(devices, v, [7., 8.]) # Restores the saved value of 3. to both variables. saver = saver_lib.Saver(var_list=[mirrored]) saver.restore(sess, save_path) self.assertEqual([3., 3.], self.evaluate([v[0], v[1]])) @test_util.run_in_graph_and_eager_modes(config=config) def testSaveMirroredRestoreMirrored(self): if context.num_gpus() < 1 and context.executing_eagerly(): # Graph mode can work without GPU because the Placer "moves" the # variable to a CPU. In other words, if there is no GPU available, but # user requested to create a variable on GPU, Placer will ignore the # user request and assign the VarHandleOp to CPU. This requires # soft_placement, which is on by default. self.skipTest("A GPU is not available for this test in eager mode.") save_path = self._save_mirrored() self._restore_mirrored(save_path) @test_util.run_in_graph_and_eager_modes(config=config) def testSaveMirroredRestoreNormal(self): if context.num_gpus() < 1 and context.executing_eagerly(): # Graph mode can work without GPU because the Placer "moves" the # variable to a CPU. In other words, if there is no GPU available, but # user requested to create a variable on GPU, Placer will ignore the # user request and assign the VarHandleOp to CPU. This requires # soft_placement, which is on by default. self.skipTest("A GPU is not available for this test in eager mode.") save_path = self._save_mirrored() self._restore_normal(save_path) @test_util.run_in_graph_and_eager_modes(config=config) def testSaveNormalRestoreMirrored(self): if context.num_gpus() < 1 and context.executing_eagerly(): # Graph mode can work without GPU because the Placer "moves" the # variable to a CPU. In other words, if there is no GPU available, but # user requested to create a variable on GPU, Placer will ignore the # user request and assign the VarHandleOp to CPU. This requires # soft_placement, which is on by default. self.skipTest("A GPU is not available for this test in eager mode.") save_path = self._save_normal() self._restore_mirrored(save_path) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_one_gpu, ], mode=["graph"])) def testFetchAMirroredVariable(self, distribution): with self.session(graph=ops.Graph()) as sess, distribution.scope(): with ops.device("/device:GPU:0"): v = variable_scope.get_variable( name="v", initializer=1., use_resource=True) mirrored = values.MirroredVariable( distribution, values.ReplicaDeviceMap(("/device:GPU:0",)), (v,), variable_scope.VariableAggregation.MEAN) sess.run(variables_lib.global_variables_initializer()) sess.run({"complicated": mirrored}) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_one_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.tpu_strategy, ], mode=["graph", "eager"])) def testAssignOutOfScope_mirrored(self, distribution): with distribution.scope(): mirrored = variables_lib.Variable(1.) if not isinstance(mirrored, values.MirroredVariable): self.assertIsInstance(mirrored, values.TPUMirroredVariable) self.evaluate(mirrored.assign(3.)) self.assertEqual(self.evaluate(mirrored.read_value()), 3.) for component in mirrored.values: self.assertEqual(self.evaluate(component.read_value()), 3.) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.central_storage_strategy_with_two_gpus ], mode=["graph", "eager"])) def testAssignOutOfScope_aggregating(self, distribution): with distribution.scope(): aggregating = variables_lib.Variable(1.) self.assertIsInstance(aggregating, values.AggregatingVariable) self.evaluate(aggregating.assign(3.)) self.assertEqual(self.evaluate(aggregating.read_value()), 3.) self.assertEqual(self.evaluate(aggregating._v.read_value()), 3.) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_one_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.tpu_strategy, strategy_combinations.central_storage_strategy_with_two_gpus, ], mode=["graph", "eager"])) def testExtendsVariable(self, distribution): with distribution.scope(): v = variables_lib.Variable(1.) self.assertIsInstance(v, variables_lib.Variable) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_one_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.tpu_strategy, strategy_combinations.central_storage_strategy_with_two_gpus, ], mode=["graph", "eager"])) def testCheckpointing(self, distribution): with distribution.scope(): v = variables_lib.Variable(constant_op.constant([1., 2., 3., 4])) self.evaluate(v.initializer) before_save = self.evaluate(v.read_value()) # Save random weights into checkpoint. checkpoint = trackable_utils.Checkpoint(v=v) prefix = os.path.join(self.get_temp_dir(), "ckpt") with self.test_session(): save_path = checkpoint.save(prefix) # Assign inverted value. self.evaluate(v.assign(constant_op.constant([4., 3., 2., 1.]))) after_assign = self.evaluate(v.read_value()) self.assertNotAllClose(before_save, after_assign) # Restore from the checkpoint. with self.test_session(): checkpoint.restore(save_path).assert_consumed().run_restore_ops() after_restore = self.evaluate(v) self.assertAllClose(before_save, after_restore) _devices = ("/device:GPU:0", "/device:CPU:0") def _make_replica_local(method, strategy=None): device_map = values.ReplicaDeviceMap(_devices) v = [] for d, n, init in zip(_devices, ["v", "v/replica"], [1., 2.]): with ops.device(d): v.append(variable_scope.get_variable( name=n, initializer=init, use_resource=True)) replica_local = values.SyncOnReadVariable(strategy, device_map, v, method) return v, replica_local class SyncOnReadVariablePropertiesTest(test.TestCase): config = config_pb2.ConfigProto() config.allow_soft_placement = True @test_util.run_in_graph_and_eager_modes(config=config) def testProperties(self): if context.num_gpus() < 1 and context.executing_eagerly(): self.skipTest("A GPU is not available for this test in eager mode.") v, replica_local = _make_replica_local( variable_scope.VariableAggregation.SUM) self.assertEqual(v[0].name, replica_local.name) self.assertEqual(v[0].dtype, replica_local.dtype) self.assertEqual(v[0].shape, replica_local.shape) self.assertEqual(variable_scope.VariableAggregation.SUM, replica_local.aggregation) @test_util.run_in_graph_and_eager_modes(config=config) def testVariableOnAnotherDevice(self): v = variable_scope.get_variable( name="v", initializer=[1.], use_resource=True) device_map = values.ReplicaDeviceMap(("/job:foo/device:CPU:0",)) replica_local = values.SyncOnReadVariable( None, device_map, (v,), variable_scope.VariableAggregation.MEAN) self.assertEqual(v.name, replica_local.name) self.assertEqual(v.dtype, replica_local.dtype) self.assertEqual(v.shape, replica_local.shape) self.assertEqual(variable_scope.VariableAggregation.MEAN, replica_local.aggregation) def testTensorConversion(self): with context.graph_mode(): _, replica_local = _make_replica_local( variable_scope.VariableAggregation.SUM) converted = ops.internal_convert_to_tensor(replica_local, as_ref=False) self.assertIsInstance(converted, ops.Tensor) self.assertEqual(converted.dtype, replica_local.dtype) converted = ops.internal_convert_to_tensor(replica_local, as_ref=True) # Resources variable are converted to tensors as well when as_ref is True. self.assertIsInstance(converted, ops.Tensor) self.assertEqual(converted.dtype, replica_local.dtype) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, ], mode=["graph", "eager"])) class SyncOnReadVariableTest(test.TestCase, parameterized.TestCase): def _assign_replica_local(self, devices, v, new): for d, var, n in zip(devices, v, new): with ops.device(d): self.evaluate(var.assign(n)) def _save_return_saver(self, sess, var): saver = saver_lib.Saver(var_list=[var]) test_dir = self.get_temp_dir() prefix = os.path.join(test_dir, "ckpt") return saver.save(sess, prefix), saver def _save(self, sess, var): save_path, _ = self._save_return_saver(sess, var) return save_path def testSaveAndRestoreReplicaLocalSumOneGraph(self, distribution): with self.cached_session() as sess: v, replica_local = _make_replica_local( variable_scope.VariableAggregation.SUM, distribution) # Overwrite the initial values. self._assign_replica_local(_devices, v, [3., 4.]) with distribution.scope(): # Saves the current value of v[0] + v[1], 7. save_path, saver = self._save_return_saver(sess, replica_local) # Change the values between save and restore. self._assign_replica_local(_devices, v, [5., 6.]) # Restores the saved value of 7. which gets divided equally # between the variables. saver.restore(sess, save_path) self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]])) def testSaveAndRestoreReplicaLocalMeanOneGraph(self, distribution): if context.num_gpus() < 1 and context.executing_eagerly(): self.skipTest("A GPU is not available for this test in eager mode.") with self.cached_session() as sess: v, replica_local = _make_replica_local( variable_scope.VariableAggregation.MEAN, distribution) # Overwrite the initial values. self._assign_replica_local(_devices, v, [3., 4.]) with distribution.scope(): # Saves the current value of (v[0] + v[1])/2, 3.5. save_path, saver = self._save_return_saver(sess, replica_local) # Change the values between save and restore. self._assign_replica_local(_devices, v, [5., 6.]) # Restores the saved value of 3.5 to both variables. saver.restore(sess, save_path) self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]])) def _save_replica_local_mean(self, distribution): """Save variables with mirroring, returns save_path.""" with self.session(graph=ops.Graph()) as sess: v, replica_local = _make_replica_local( variable_scope.VariableAggregation.MEAN, distribution) # Overwrite the initial values. self._assign_replica_local(_devices, v, [3., 4.]) with distribution.scope(): # Saves the current value of (v[0] + v[1])/2, 3.5 save_path = self._save(sess, replica_local) # Change the values between save and restore. self._assign_replica_local(_devices, v, [5., 6.]) return save_path def _save_replica_local_sum(self, distribution): """Save variables with mirroring, returns save_path.""" with self.session(graph=ops.Graph()) as sess: v, replica_local = _make_replica_local( variable_scope.VariableAggregation.SUM, distribution) # Overwrite the initial values. self._assign_replica_local(_devices, v, [1.5, 2.]) with distribution.scope(): # Saves the current value of v[0] + v[1], 3.5 save_path = self._save(sess, replica_local) # Change the values between save and restore. self._assign_replica_local(_devices, v, [5., 6.]) return save_path def _save_normal(self): """Save variables without mirroring, returns save_path.""" with self.session(graph=ops.Graph()) as sess: var = variable_scope.get_variable( name="v", initializer=1., use_resource=True) # Overwrite the initial value. self.evaluate(var.assign(3.5)) # Saves the current value of var, 3.5. save_path = self._save(sess, var) # Change the values between save and restore. self.evaluate(var.assign(5.)) return save_path def _restore_normal(self, save_path): """Restore to variables without mirroring in a fresh graph.""" with self.session(graph=ops.Graph()) as sess: var = variable_scope.get_variable( name="v", initializer=7., use_resource=True) # Overwrite the initial value. self.evaluate(var.assign(8.)) # Restores the saved value of 3.5 to `var`. saver = saver_lib.Saver(var_list=[var]) saver.restore(sess, save_path) self.assertEqual(3.5, self.evaluate(var)) def _restore_replica_local_mean(self, save_path, distribution): """Restore to variables with mirroring in a fresh graph.""" with self.session(graph=ops.Graph()) as sess: v, replica_local = _make_replica_local( variable_scope.VariableAggregation.MEAN, distribution) # Overwrite the initial values. self._assign_replica_local(_devices, v, [7., 8.]) with distribution.scope(): # Restores the saved value of 3.5 to both variables. saver = saver_lib.Saver(var_list=[replica_local]) saver.restore(sess, save_path) self.assertEqual([3.5, 3.5], self.evaluate([v[0], v[1]])) def _restore_replica_local_sum(self, save_path, distribution): """Restore to variables with mirroring in a fresh graph.""" with self.session(graph=ops.Graph()) as sess: v, replica_local = _make_replica_local( variable_scope.VariableAggregation.SUM, distribution) # Overwrite the initial values. self._assign_replica_local(_devices, v, [7., 8.]) with distribution.scope(): # Restores the saved value of 3.5 to both variables. saver = saver_lib.Saver(var_list=[replica_local]) saver.restore(sess, save_path) self.assertEqual([1.75, 1.75], self.evaluate([v[0], v[1]])) def testSaveReplicaLocalRestoreReplicaLocalMean(self, distribution): save_path = self._save_replica_local_mean(distribution) self._restore_replica_local_mean(save_path, distribution) def testSaveReplicaLocalRestoreReplicaLocalSum(self, distribution): save_path = self._save_replica_local_sum(distribution) self._restore_replica_local_sum(save_path, distribution) def testSaveReplicaLocalMeanRestoreNormal(self, distribution): save_path = self._save_replica_local_mean(distribution) self._restore_normal(save_path) def testSaveReplicaLocalSumRestoreNormal(self, distribution): save_path = self._save_replica_local_sum(distribution) self._restore_normal(save_path) def testSaveNormalRestoreReplicaLocalMean(self, distribution): save_path = self._save_normal() self._restore_replica_local_mean(save_path, distribution) def testSaveNormalRestoreReplicaLocalSum(self, distribution): save_path = self._save_normal() self._restore_replica_local_sum(save_path, distribution) def testAssign(self, distribution): def assign(fn, v, update_value, cross_replica): update_fn = lambda: getattr(v, fn)(update_value) if cross_replica: return update_fn() else: return distribution.experimental_local_results( distribution.experimental_run_v2(update_fn)) updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)] aggregations = [ variables_lib.VariableAggregation.NONE, variables_lib.VariableAggregation.SUM, variables_lib.VariableAggregation.MEAN, variables_lib.VariableAggregation.ONLY_FIRST_REPLICA, ] options = ( # VariableAggregation.SUM in cross-replica mode is tested below [x for x in itertools.product(updates, aggregations, [True, False]) if not(x[1] == variables_lib.VariableAggregation.SUM and x[2])]) for update, aggregation, cross_replica in options: with distribution.scope(): v = variable_scope.variable( 0., synchronization=variables_lib.VariableSynchronization.ON_READ, aggregation=aggregation) self.evaluate(variables_lib.global_variables_initializer()) fn, update_value = update self.evaluate(assign(fn, v, update_value, cross_replica)) for component in v._values: self.assertAllEqual(self.evaluate(component.read_value()), self.evaluate(array_ops.ones_like(component))) def testAssignWithAggregationSum(self, distribution): with distribution.scope(): v = variable_scope.variable( 0., synchronization=variables_lib.VariableSynchronization.ON_READ, aggregation=variables_lib.VariableAggregation.SUM) self.evaluate(variables_lib.global_variables_initializer()) self.evaluate(v.assign(1. * distribution.num_replicas_in_sync)) for component in v._values: self.assertAllEqual(self.evaluate(component.read_value()), self.evaluate(array_ops.ones_like(component))) def testAssignAddSubWithAggregationSum(self, distribution): with distribution.scope(): v = variable_scope.variable( 0., synchronization=variables_lib.VariableSynchronization.ON_READ, aggregation=variables_lib.VariableAggregation.SUM) self.evaluate(variables_lib.global_variables_initializer()) with self.assertRaisesRegex( ValueError, "SyncOnReadVariable does not support "): self.evaluate(v.assign_add(1.)) with self.assertRaisesRegex( ValueError, "SyncOnReadVariable does not support "): self.evaluate(v.assign_sub(1.)) def testReadValueInReplicaContext(self, distribution): aggregations = [ variables_lib.VariableAggregation.NONE, variables_lib.VariableAggregation.SUM, variables_lib.VariableAggregation.MEAN, variables_lib.VariableAggregation.ONLY_FIRST_REPLICA, ] for aggregation in aggregations: with distribution.scope(): v = variable_scope.variable( 0., synchronization=variables_lib.VariableSynchronization.ON_READ, aggregation=aggregation) self.evaluate(variables_lib.global_variables_initializer()) results = self.evaluate(distribution.experimental_local_results( distribution.experimental_run_v2(v.read_value))) for component, value in zip(v._values, results): self.assertAllEqual(self.evaluate(component.read_value()), value) def testReadValueInCrossReplicaContext(self, distribution): aggregations = [ variables_lib.VariableAggregation.SUM, variables_lib.VariableAggregation.MEAN, variables_lib.VariableAggregation.ONLY_FIRST_REPLICA, ] for aggregation in aggregations: with distribution.scope(): v = variable_scope.variable( 0., synchronization=variables_lib.VariableSynchronization.ON_READ, aggregation=aggregation) self.evaluate(variables_lib.global_variables_initializer()) def assign(v=v): ctx = distribution_strategy_context.get_replica_context() replica_id = ctx.replica_id_in_sync_group return v.assign(math_ops.cast(replica_id, dtypes.float32)) self.evaluate(distribution.experimental_local_results( distribution.experimental_run_v2(assign))) result = self.evaluate(v.read_value()) num_replicas = distribution.num_replicas_in_sync sum_of_replica_values = num_replicas * (num_replicas - 1) / 2. if aggregation == variables_lib.VariableAggregation.SUM: expected = sum_of_replica_values elif aggregation == variables_lib.VariableAggregation.MEAN: expected = sum_of_replica_values / num_replicas else: expected = 0 self.assertEqual(expected, result, aggregation) def testReadValueWithAggregationNoneInCrossReplicaContext(self, distribution): with distribution.scope(): v = variable_scope.variable( 0., synchronization=variables_lib.VariableSynchronization.ON_READ, aggregation=variables_lib.VariableAggregation.NONE) self.evaluate(variables_lib.global_variables_initializer()) with self.assertRaisesRegex( ValueError, "Could not convert from .* VariableAggregation\\.NONE"): self.evaluate(v.read_value()) class PerReplicaTest(test.TestCase, parameterized.TestCase): def testTypeSpec(self): device_map = values.SingleDeviceMap("CPU") vals = (constant_op.constant(1.),) per_replica = values.PerReplica(device_map, vals) spec = per_replica._type_spec self.assertEqual(spec._value_specs, (tensor_spec.TensorSpec([], dtypes.float32),)) self.assertEqual(spec._device_map, per_replica.device_map) self.assertEqual(spec._logical_device, per_replica.logical_device) def testTypeSpecRoundTrip(self): device_map = values.SingleDeviceMap("CPU") vals = (constant_op.constant(1.),) per_replica = values.PerReplica(device_map, vals) spec = per_replica._type_spec tensor_list = spec._to_components(per_replica) reconstructed = spec._from_components(tensor_list) self.assertEqual(per_replica.device_map, reconstructed.device_map) self.assertEqual(per_replica.logical_device, reconstructed.logical_device) self.assertAllEqual(per_replica.values, reconstructed.values) def testTypeSpecNest(self): device_map = values.ReplicaDeviceMap(["CPU:0", "CPU:1"]) vals = (constant_op.constant(1.), constant_op.constant([5., 6.0]),) per_replica = values.PerReplica(device_map, vals) # Note: nest.map_structutre exercises nest.flatten and # nest.pack_sequence_as. result = nest.map_structure(lambda t: t + 10, per_replica, expand_composites=True) self.assertEqual(per_replica.device_map, result.device_map) self.assertEqual(per_replica.logical_device, result.logical_device) self.assertLen(result.values, 2) self.assertAllEqual(result.values[0], 11.) self.assertAllEqual(result.values[1], [15., 16.0]) @test_util.run_in_graph_and_eager_modes def testIsGraphTensor(self): per_replica = values.PerReplica(values.SingleDeviceMap("CPU"), (constant_op.constant(1.),)) self.assertEqual(per_replica._is_graph_tensor, not context.executing_eagerly()) def testDoesNotTriggerFunctionTracing(self): traces = [] @def_function.function def f(x): traces.append(None) # Only happens on trace. return x per_replica = values.PerReplica( values.SingleDeviceMap("CPU"), (constant_op.constant(1.),)) # Trace once. f(per_replica) self.assertNotEmpty(traces) del traces[:] per_replica_spec = per_replica._type_spec for _ in range(5): vals = per_replica_spec._to_components(per_replica) vals = [v * 2 for v in vals] per_replica = per_replica_spec._from_components(vals) output = f(per_replica) self.assertIsInstance(output, values.PerReplica) self.assertAllEqual(output._values, per_replica._values) self.assertAllEqual(output._device_map, per_replica._device_map) self.assertAllEqual(output._logical_device, per_replica._logical_device) self.assertEmpty(traces) # Make sure we're not re-tracing `f`. def testFunctionCanReturnPerReplica(self): f = def_function.function(lambda x: x) x = values.PerReplica( values.SingleDeviceMap("CPU"), (constant_op.constant(1.),)) y = f(x) self.assertIsNot(x, y) for a, b in zip(x._to_components(), y._to_components()): self.assertAllEqual(a, b) self.assertEqual(x._component_metadata(), y._component_metadata()) @test_util.run_in_graph_and_eager_modes def testCondWithTensorValues(self): device_map = values.SingleDeviceMap("CPU") per_replica_1 = values.PerReplica(device_map, (constant_op.constant("a"),)) per_replica_2 = values.PerReplica(device_map, (constant_op.constant(["b", "c"]),)) condition = array_ops.placeholder_with_default(True, []) result = control_flow_ops.cond( condition, lambda: per_replica_1, lambda: per_replica_2) self.assertEqual(per_replica_1.device_map, result.device_map) self.assertEqual(per_replica_1.logical_device, result.logical_device) self.assertLen(result.values, 1) self.assertAllEqual(result.values[0], "a") @test_util.run_in_graph_and_eager_modes def testCondWithValuesConvertibleToTensor(self): device_map = values.SingleDeviceMap("CPU") per_replica_1 = values.PerReplica(device_map, ("a",)) per_replica_2 = values.PerReplica(device_map, ("b",)) condition = array_ops.placeholder_with_default(True, []) result = control_flow_ops.cond( condition, lambda: per_replica_1, lambda: per_replica_2) self.assertEqual(per_replica_1.device_map, result.device_map) self.assertEqual(per_replica_1.logical_device, result.logical_device) self.assertLen(result.values, 1) self.assertAllEqual(result.values[0], "a") @test_util.build_as_function_and_v1_graph def testCondWithValuesNotConvertibleToTensor(self): device_map = values.SingleDeviceMap("CPU") per_replica_1 = values.PerReplica(device_map, (set(["a"]),)) per_replica_2 = values.PerReplica(device_map, (set(["b", "c"]),)) condition = array_ops.placeholder(dtypes.bool, []) with self.assertRaisesRegex(TypeError, "Could not build a TypeSpec for"): control_flow_ops.cond( condition, lambda: per_replica_1, lambda: per_replica_2) class WorkerDeviceMapTest(test.TestCase): class ReplicaContext(object): def __init__(self, replica_id_in_sync_group): self.replica_id_in_sync_group = replica_id_in_sync_group def testBasic(self): devices = [ "/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:2/device:CPU:0" ] device_map = values.WorkerDeviceMap(devices, 1) self.assertAllEqual(devices, device_map.all_devices) # pylint:disable=pointless-statement with self.assertRaisesWithPredicateMatch( ValueError, "`WorkerDeviceMap` is not indexed by replicas"): device_map.devices_by_replica self.assertEqual(1, device_map.num_logical_devices) self.assertEqual(2, device_map.num_replicas_in_graph) self.assertEqual(0, device_map.logical_device_from_values(["a", "b"])) self.assertAllEqual(devices, device_map.logical_to_actual_devices(0)) replica_context = WorkerDeviceMapTest.ReplicaContext(1) self.assertEqual( "b", device_map.select_for_current_replica(["a", "b"], replica_context)) with self.assertRaisesWithPredicateMatch( ValueError, "`WorkerDeviceMap` not indexed by replicas"): device_map.replica_for_device(devices[1]) self.assertEqual("b", device_map.select_for_device(["a", "b"], devices[1])) with self.assertRaisesWithPredicateMatch( ValueError, "WorkerDeviceMap not indexed by replicas"): device_map.is_device_in_replica(devices[1], 1) self.assertEqual( "WorkerDeviceMap(('/job:worker/replica:0/task:0/device:CPU:0', " "'/job:worker/replica:0/task:2/device:CPU:0'), " "num_replicas_per_worker=1)", repr(device_map)) def testMultipleReplicasPerWorker(self): devices = [ "/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:2/device:CPU:0" ] device_map = values.WorkerDeviceMap(devices, 2) replica_context = WorkerDeviceMapTest.ReplicaContext(3) self.assertEqual( "b", device_map.select_for_current_replica(["a", "b"], replica_context)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/values_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for checkpoint_utils.init_from_checkpoint with Distribution Strategy. These tests are located here instead of as part of `python.training.CheckpointsTest` because they need access to distribution strategies which are only present in contrib right now. TODO(priyag): Move the tests to core `python.training.CheckpointsTest` when distribution strategy moves out of contrib. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.framework import ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import checkpoint_utils from tensorflow.python.training import saver as saver_lib def _create_checkpoints(sess, checkpoint_dir): checkpoint_prefix = os.path.join(checkpoint_dir, "model") checkpoint_state_name = "checkpoint" v1 = variable_scope.get_variable("var1", [1, 10]) v2 = variable_scope.get_variable("var2", [10, 10]) sess.run(variables.global_variables_initializer()) v1_value, v2_value = sess.run([v1, v2]) saver = saver_lib.Saver() saver.save( sess, checkpoint_prefix, global_step=0, latest_filename=checkpoint_state_name) return v1_value, v2_value class CheckpointUtilsWithDistributionStrategyTest( test.TestCase, parameterized.TestCase): def _get_test_object(self): checkpoint_dir = self.get_temp_dir() with self.cached_session() as session: v1, v2 = _create_checkpoints(session, checkpoint_dir) return checkpoint_dir, v1, v2 @combinations.generate( combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ], in_replica_mode=[True, False], mode=["graph"])) def testInitFromCheckpoint(self, distribution, in_replica_mode): checkpoint_dir, v1_value, v2_value = self._get_test_object() def init_and_verify(g): v1 = variable_scope.get_variable("new_var1", [1, 10]) v2 = variable_scope.get_variable( "new_var2", [10, 10], synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.MEAN) checkpoint_utils.init_from_checkpoint(checkpoint_dir, { "var1": "new_var1", "var2": "new_var2" }) with self.session(graph=g) as session: session.run(variables.global_variables_initializer()) self.assertAllEqual(v1_value, self.evaluate(v1)) self.assertAllEqual(v2_value, self.evaluate(v2)) with ops.Graph().as_default() as g, distribution.scope(): if in_replica_mode: distribution.extended.call_for_each_replica(init_and_verify, args=[g]) else: init_and_verify(g) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ], in_replica_mode=[True, False], mode=["graph"])) def testInitFromDifferentNameObject(self, distribution, in_replica_mode): checkpoint_dir, v1_value, _ = self._get_test_object() def init_and_verify(g): v1 = variable_scope.get_variable("new_var1", [1, 10]) # Use string add to create new object in each replica prefix = "new_" suffix = "var1" new_var1 = prefix + suffix checkpoint_utils.init_from_checkpoint(checkpoint_dir, { "var1": new_var1, }) with self.test_session(graph=g) as session: session.run(variables.global_variables_initializer()) self.assertAllEqual(v1_value, self.evaluate(v1)) with ops.Graph().as_default() as g, distribution.scope(): if in_replica_mode: distribution.extended.call_for_each_replica(init_and_verify, [g]) else: init_and_verify(g) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/checkpoint_utils_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class MirroredStrategy implementing DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import copy import threading from tensorflow.python import pywrap_tensorflow from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import input_lib from tensorflow.python.distribute import multi_worker_util from tensorflow.python.distribute import numpy_dataset from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import shared_variable_creator from tensorflow.python.distribute import values from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver from tensorflow.python.eager import context from tensorflow.python.eager import tape from tensorflow.python.framework import constant_op from tensorflow.python.framework import device as tf_device from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variable_scope from tensorflow.python.training import coordinator from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export # TODO(josh11b): Replace asserts in this file with if ...: raise ... @contextlib.contextmanager def _enter_graph(g, eager, creator_stack=None): """Context manager for selecting a graph and maybe eager mode.""" if eager: with g.as_default(), context.eager_mode(): if creator_stack is not None: g._variable_creator_stack = creator_stack # pylint: disable=protected-access yield else: with g.as_default(): if creator_stack is not None: g._variable_creator_stack = creator_stack # pylint: disable=protected-access yield def _cpu_device(device): cpu_device = tf_device.DeviceSpec.from_string(device) cpu_device = cpu_device.replace(device_type="CPU", device_index=0) return cpu_device.to_string() class _RequestedStop(Exception): # pylint: disable=g-bad-exception-name pass # _call_for_each_replica is not a member of MirroredStrategy so that it is # not allowed to use anything specific to MirroredStrategy and thus # can be shared with other distribution strategies. # TODO(yuefengz): maybe create a common class for those who need to call this # _call_for_each_replica. def _call_for_each_replica(distribution, device_map, fn, args, kwargs): """Run `fn` in separate threads, once per replica/worker device. Args: distribution: the DistributionStrategy object. device_map: the DeviceMap with the devices to run `fn` on. fn: function to run (will be run once per replica, each in its own thread). args: positional arguments for `fn` kwargs: keyword arguments for `fn`. Returns: Merged return value of `fn` across all replicas. Raises: RuntimeError: If fn() calls get_replica_context().merge_call() a different number of times from the available devices. """ # TODO(josh11b): Add this option once we add synchronization to variable # creation. Until then, this is pretty unsafe to use. run_concurrently = False if not context.executing_eagerly(): # Needed for per-thread device, etc. contexts in graph mode. ops.get_default_graph().switch_to_thread_local() coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,)) shared_variable_store = {} # TODO(isaprykin): Create these threads once instead of during every call. threads = [] for index in range(device_map.num_replicas_in_graph): variable_creator_fn = shared_variable_creator.make_fn( shared_variable_store, index) t = _MirroredReplicaThread( distribution, coord, index, device_map, variable_creator_fn, fn, values.select_replica(index, args), values.select_replica(index, kwargs)) threads.append(t) for t in threads: t.start() # When `fn` starts `should_run` event is set on _MirroredReplicaThread # (`MRT`) threads. The execution waits until # `MRT.has_paused` is set, which indicates that either `fn` is # complete or a `get_replica_context().merge_call()` is called. If `fn` is # complete, then `MRT.done` is set to True. Otherwise, arguments # of `get_replica_context().merge_call` from all paused threads are grouped # and the `merge_fn` is performed. Results of the # `get_replica_context().merge_call` are then set to `MRT.merge_result`. # Each such `get_replica_context().merge_call` call returns the # `MRT.merge_result` for that thread when `MRT.should_run` event # is reset again. Execution of `fn` resumes. try: with coord.stop_on_exception(): all_done = False while not all_done and not coord.should_stop(): done = [] if run_concurrently: for t in threads: t.should_run.set() for t in threads: t.has_paused.wait() t.has_paused.clear() if coord.should_stop(): return None done.append(t.done) else: for t in threads: t.should_run.set() t.has_paused.wait() t.has_paused.clear() if coord.should_stop(): return None done.append(t.done) if coord.should_stop(): return None all_done = all(done) if not all_done: if any(done): raise RuntimeError("Some replicas made a different number of " "replica_context().merge_call() calls.") # get_replica_context().merge_call() case merge_args = values.regroup( device_map, tuple(t.merge_args for t in threads)) merge_kwargs = values.regroup( device_map, tuple(t.merge_kwargs for t in threads)) # We capture the name_scope of the MRT when we call merge_fn # to ensure that if we have opened a name scope in the MRT, # it will be respected when executing the merge function. We only # capture the name_scope from the first MRT and assume it is # the same for all other MRTs. mtt_captured_name_scope = threads[0].captured_name_scope mtt_captured_var_scope = threads[0].captured_var_scope # Capture and merge the control dependencies from all the threads. mtt_captured_control_deps = set() for t in threads: mtt_captured_control_deps.update(t.captured_control_deps) with ops.name_scope(mtt_captured_name_scope),\ ops.control_dependencies(mtt_captured_control_deps), \ variable_scope.variable_scope(mtt_captured_var_scope): merge_result = threads[0].merge_fn(distribution, *merge_args, **merge_kwargs) for r, t in enumerate(threads): t.merge_result = values.select_replica(r, merge_result) finally: for t in threads: t.should_run.set() coord.join(threads) return values.regroup(device_map, tuple(t.main_result for t in threads)) def _create_mirrored_variable(strategy, device_map, logical_device, # pylint: disable=missing-docstring real_mirrored_creator, *args, **kwargs): # Figure out what collections this variable should be added to. # We'll add the MirroredVariable to those collections instead. collections = kwargs.pop("collections", None) if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] kwargs["collections"] = [] # Get synchronization value synchronization = kwargs.get("synchronization", variable_scope.VariableSynchronization.ON_WRITE) if synchronization == variable_scope.VariableSynchronization.NONE: raise ValueError("`NONE` variable synchronization mode is not " "supported with `Mirrored` distribution strategy. Please" " change the `synchronization` for variable: " + kwargs["name"]) elif synchronization == variable_scope.VariableSynchronization.ON_READ: # Variables that are to be synced on read are replica local. is_sync_on_read = True elif (synchronization == variable_scope.VariableSynchronization.ON_WRITE or synchronization == variable_scope.VariableSynchronization.AUTO): # `AUTO` synchronization for `MirroredStrategy` is `ON_WRITE`. is_sync_on_read = False else: raise ValueError( "Invalid variable synchronization mode: %s for variable: %s" % (synchronization, kwargs["name"])) # Get aggregation value aggregation = kwargs.pop("aggregation", variable_scope.VariableAggregation.NONE) if aggregation not in ( variable_scope.VariableAggregation.NONE, variable_scope.VariableAggregation.SUM, variable_scope.VariableAggregation.MEAN, variable_scope.VariableAggregation.ONLY_FIRST_REPLICA ): raise ValueError( "Invalid variable aggregation mode: %s for variable: %s" % (aggregation, kwargs["name"])) # Ignore user-specified caching device, not needed for mirrored variables. kwargs.pop("caching_device", None) # TODO(josh11b,apassos): It would be better if variable initialization # was never recorded on the tape instead of having to do this manually # here. with tape.stop_recording(): devices = device_map.logical_to_actual_devices(logical_device) value_list = real_mirrored_creator(devices, *args, **kwargs) if is_sync_on_read: result = values.SyncOnReadVariable( strategy, device_map, value_list, aggregation, logical_device=logical_device) else: result = values.MirroredVariable( strategy, device_map, value_list, aggregation, logical_device=logical_device) # Add the wrapped variable to the requested collections. # The handling of eager mode and the global step matches # ResourceVariable._init_from_args(). if not context.executing_eagerly(): g = ops.get_default_graph() # If "trainable" is True, next_creator() will add the member variables # to the TRAINABLE_VARIABLES collection, so we manually remove # them and replace with the MirroredVariable. We can't set # "trainable" to False for next_creator() since that causes functions # like implicit_gradients to skip those variables. if kwargs.get("trainable", True): collections.append(ops.GraphKeys.TRAINABLE_VARIABLES) l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES) for v in value_list: if v in l: l.remove(v) g.add_to_collections(collections, result) elif ops.GraphKeys.GLOBAL_STEP in collections: ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, result) return result def _is_device_list_local(devices): """Checks whether the devices list is for local or multi-worker. Args: devices: a list of device strings, either local for remote devices. Returns: a boolean indicating whether these device strings are for local or for remote. Raises: ValueError: if device strings are not consistent. """ all_local = None for d in devices: d_spec = tf_device.DeviceSpec.from_string(d) is_local = d_spec.job in (None, "localhost") if all_local is None: # Determine all_local from first device. all_local = is_local if all_local: if not is_local: raise ValueError("Local device string cannot have job specified other " "than 'localhost'") else: if is_local: raise ValueError("Remote device string must have job specified.") if d_spec.task is None: raise ValueError("Remote device string must have task specified.") return all_local def _cluster_spec_to_device_list(cluster_spec, num_gpus_per_worker): """Returns a device list given a cluster spec.""" cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec) devices = [] for task_type in ("chief", "worker"): for task_id in range(len(cluster_spec.as_dict().get(task_type, []))): if num_gpus_per_worker == 0: devices.append("/job:%s/task:%d" % (task_type, task_id)) else: devices.extend([ "/job:%s/task:%d/device:GPU:%i" % (task_type, task_id, gpu_id) for gpu_id in range(num_gpus_per_worker) ]) return devices def _group_device_list(devices): """Groups the devices list by task_type and task_id. Args: devices: a list of device strings for remote devices. Returns: a dict of list of device strings mapping from task_type to a list of devices for the task_type in the asceding order of task_id. """ assert not _is_device_list_local(devices) device_dict = {} for d in devices: d_spec = tf_device.DeviceSpec.from_string(d) # Create an entry for the task_type. if d_spec.job not in device_dict: device_dict[d_spec.job] = [] # Fill the device list for task_type until it covers the task_id. while len(device_dict[d_spec.job]) <= d_spec.task: device_dict[d_spec.job].append([]) device_dict[d_spec.job][d_spec.task].append(d) return device_dict def _is_gpu_device(device): return tf_device.DeviceSpec.from_string(device).device_type == "GPU" def _infer_num_gpus_per_worker(devices): """Infers the number of GPUs on each worker. Currently to make multi-worker cross device ops work, we need all workers to have the same number of GPUs. Args: devices: a list of device strings, can be either local devices or remote devices. Returns: number of GPUs per worker. Raises: ValueError if workers have different number of GPUs or GPU indices are not consecutive and starting from 0. """ if _is_device_list_local(devices): return sum(1 for d in devices if _is_gpu_device(d)) else: device_dict = _group_device_list(devices) num_gpus = None for _, devices_in_task in device_dict.items(): for device_in_task in devices_in_task: if num_gpus is None: num_gpus = sum(1 for d in device_in_task if _is_gpu_device(d)) # Verify other workers have the same number of GPUs. elif num_gpus != sum(1 for d in device_in_task if _is_gpu_device(d)): raise ValueError("All workers should have the same number of GPUs.") for d in device_in_task: d_spec = tf_device.DeviceSpec.from_string(d) if (d_spec.device_type == "GPU" and d_spec.device_index >= num_gpus): raise ValueError("GPU `device_index` on a worker should be " "consecutive and start from 0.") return num_gpus def all_local_devices(num_gpus=None): if num_gpus is None: num_gpus = context.num_gpus() return device_util.local_devices_from_num_gpus(num_gpus) def _all_devices(): devices = [] tfconfig = TFConfigClusterResolver() if tfconfig.cluster_spec().as_dict(): devices = _cluster_spec_to_device_list(tfconfig.cluster_spec(), context.num_gpus()) return devices if devices else all_local_devices() @tf_export("distribute.MirroredStrategy", v1=[]) class MirroredStrategy(distribute_lib.Strategy): """Mirrors vars to distribute across multiple devices and machines. This strategy uses one replica per device and sync replication for its multi-GPU version. The multi-worker version will be added in the future. Args: devices: a list of device strings. If `None`, all available GPUs are used. If no GPUs are found, CPU is used. cross_device_ops: optional, a descedant of `CrossDeviceOps`. If this is not set, nccl will be used by default. """ def __init__(self, devices=None, cross_device_ops=None): extended = MirroredExtended( self, devices=devices, cross_device_ops=cross_device_ops) super(MirroredStrategy, self).__init__(extended) @tf_export(v1=["distribute.MirroredStrategy"]) class MirroredStrategyV1(distribute_lib.StrategyV1): __doc__ = MirroredStrategy.__doc__ def __init__(self, devices=None, cross_device_ops=None): extended = MirroredExtended( self, devices=devices, cross_device_ops=cross_device_ops) super(MirroredStrategyV1, self).__init__(extended) # TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1. class MirroredExtended(distribute_lib.StrategyExtendedV1): """Implementation of MirroredStrategy.""" def __init__(self, container_strategy, devices=None, cross_device_ops=None): super(MirroredExtended, self).__init__(container_strategy) if devices is None: devices = _all_devices() if not devices: raise ValueError("Got an empty `devices` list. Please make sure the " "`devices` you pass in is not empty.") self._cross_device_ops = cross_device_ops self._initialize_strategy(devices) def _initialize_strategy(self, devices): # The _initialize_strategy method is intended to be used by distribute # coordinator as well. if _is_device_list_local(devices): self._initialize_local(devices) else: self._initialize_multi_worker(devices) def _initialize_local(self, devices): """Initializes the object for local training.""" self._local_mode = True assert devices, "Must specify at least one device." devices = tuple(device_util.resolve(d) for d in devices) assert len(set(devices)) == len(devices), ( "No duplicates allowed in `devices` argument: %s" % (devices,)) # TODO(josh11b): Require at least 2 devices? self._device_map = values.ReplicaDeviceMap(devices) self._input_workers = input_lib.InputWorkers(self._device_map) self._inferred_cross_device_ops = cross_device_ops_lib.choose_the_best( devices) self._host_input_device = numpy_dataset.SingleDevice("/cpu:0") def _initialize_multi_worker(self, devices): """Initializes the object for multi-worker training.""" self._local_mode = False assert devices, "Must specify at least one device." devices = tuple(device_util.resolve(d) for d in devices) assert len(set(devices)) == len(devices), ( "No duplicates allowed in `devices` argument: %s" % devices) # TODO(josh11b): Require at least 2 devices? device_dict = _group_device_list(devices) workers = [] worker_devices = [] for job in ("chief", "worker"): for task in range(len(device_dict.get(job, []))): worker = "/job:%s/task:%d" % (job, task) workers.append(worker) worker_devices.append((worker, device_dict[job][task])) # Setting `_default_device` will add a device scope in the # distribution.scope. We set the default device to the first worker. When # users specify device under distribution.scope by # with tf.device("/cpu:0"): # ... # their ops will end up on the cpu device of its first worker, e.g. # "/job:worker/task:0/device:CPU:0". Note this is not used in replica mode. self._default_device = workers[0] self._host_input_device = numpy_dataset.SingleDevice(workers[0]) self._device_map = values.ReplicaDeviceMap(devices) self._input_workers = input_lib.InputWorkers( self._device_map, worker_devices) if len(workers) > 1: if not isinstance(self._cross_device_ops, cross_device_ops_lib.MultiWorkerAllReduce): raise ValueError( "In-graph multi-worker training with `MirroredStrategy` is not " "supported.") self._inferred_cross_device_ops = self._cross_device_ops else: # TODO(yuefengz): make `choose_the_best` work with device strings # containing job names. self._inferred_cross_device_ops = cross_device_ops_lib.NcclAllReduce() def _get_variable_creator_initial_value(self, replica_id=0, device=None, primary_var=None, **kwargs): """Return the initial value for variables on a replica.""" if replica_id == 0: return kwargs["initial_value"] else: assert primary_var is not None assert device is not None assert kwargs is not None def initial_value_fn(): if context.executing_eagerly() or ops.inside_function(): init_value = primary_var.value() return array_ops.identity(init_value) else: with ops.device(device): init_value = primary_var.initial_value return array_ops.identity(init_value) return initial_value_fn def _create_variable(self, next_creator, *args, **kwargs): """Create a mirrored variable. See `DistributionStrategy.scope`.""" colocate_with = kwargs.pop("colocate_with", None) if colocate_with is None: device_map = self._device_map logical_device = 0 # TODO(josh11b): Get logical device from scope here. elif isinstance(colocate_with, numpy_dataset.SingleDevice): with ops.device(colocate_with.device): return next_creator(*args, **kwargs) else: device_map = colocate_with.device_map logical_device = colocate_with.logical_device def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring value_list = [] for i, d in enumerate(devices): with ops.device(d): kwargs["initial_value"] = self._get_variable_creator_initial_value( replica_id=i, device=d, primary_var=value_list[0] if value_list else None, **kwargs) if i > 0: # Give replicas meaningful distinct names: var0name = value_list[0].name.split(":")[0] # We append a / to variable names created on replicas with id > 0 to # ensure that we ignore the name scope and instead use the given # name as the absolute name of the variable. kwargs["name"] = "%s/replica_%d/" % (var0name, i) with context.device_policy(context.DEVICE_PLACEMENT_SILENT): # Don't record operations (e.g. other variable reads) during # variable creation. with tape.stop_recording(): v = next_creator(*args, **kwargs) assert not isinstance(v, values.DistributedVariable) value_list.append(v) return value_list return _create_mirrored_variable( self._container_strategy(), device_map, logical_device, _real_mirrored_creator, *args, **kwargs) def _validate_colocate_with_variable(self, colocate_with_variable): values.validate_colocate_distributed_variable(colocate_with_variable, self) def _make_dataset_iterator(self, dataset): return input_lib.DatasetIterator( dataset, self._input_workers, self._container_strategy(), split_batch_by=self._num_replicas_in_sync) def _make_input_fn_iterator( self, input_fn, replication_mode=distribute_lib.InputReplicationMode.PER_WORKER): input_contexts = [] num_workers = self._input_workers.num_workers for i in range(num_workers): input_contexts.append(distribute_lib.InputContext( num_input_pipelines=num_workers, input_pipeline_id=i, num_replicas_in_sync=self._num_replicas_in_sync)) return input_lib.InputFunctionIterator(input_fn, self._input_workers, input_contexts, self._container_strategy()) def _experimental_distribute_dataset(self, dataset): return input_lib.get_distributed_dataset( dataset, self._input_workers, self._container_strategy(), split_batch_by=self._num_replicas_in_sync) def _experimental_make_numpy_dataset(self, numpy_input, session): return numpy_dataset.one_host_numpy_dataset( numpy_input, self._host_input_device, session) def _experimental_distribute_datasets_from_function(self, dataset_fn): input_contexts = [] num_workers = self._input_workers.num_workers for i in range(num_workers): input_contexts.append(distribute_lib.InputContext( num_input_pipelines=num_workers, input_pipeline_id=i, num_replicas_in_sync=self._num_replicas_in_sync)) return input_lib.DistributedDatasetsFromFunction( dataset_fn, self._input_workers, input_contexts, self._container_strategy()) # TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed. def _experimental_run_steps_on_iterator(self, fn, iterator, iterations, initial_loop_values=None): if initial_loop_values is None: initial_loop_values = {} initial_loop_values = nest.flatten(initial_loop_values) ctx = input_lib.MultiStepContext() def body(i, *args): """A wrapper around `fn` to create the while loop body.""" del args fn_result = fn(ctx, iterator.get_next()) for (name, output) in ctx.last_step_outputs.items(): # Convert all outputs to tensors, potentially from `DistributedValues`. ctx.last_step_outputs[name] = self._local_results(output) flat_last_step_outputs = nest.flatten(ctx.last_step_outputs) with ops.control_dependencies([fn_result]): return [i + 1] + flat_last_step_outputs # We capture the control_flow_context at this point, before we run `fn` # inside a while_loop. This is useful in cases where we might need to exit # these contexts and get back to the outer context to do some things, for # e.g. create an op which should be evaluated only once at the end of the # loop on the host. One such usage is in creating metrics' value op. self._outer_control_flow_context = ( ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access cond = lambda i, *args: i < iterations i = constant_op.constant(0) loop_result = control_flow_ops.while_loop( cond, body, [i] + initial_loop_values, name="", parallel_iterations=1, back_prop=False, swap_memory=False, return_same_structure=True) del self._outer_control_flow_context ctx.run_op = control_flow_ops.group(loop_result) # Convert the last_step_outputs from a list to the original dict structure # of last_step_outputs. last_step_tensor_outputs = loop_result[1:] last_step_tensor_outputs_dict = nest.pack_sequence_as( ctx.last_step_outputs, last_step_tensor_outputs) for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access output = last_step_tensor_outputs_dict[name] # For outputs that have already been reduced, wrap them in a Mirrored # container, else in a PerReplica container. if reduce_op is None: last_step_tensor_outputs_dict[name] = values.regroup(self._device_map, output) else: assert len(output) == 1 last_step_tensor_outputs_dict[name] = output[0] ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access return ctx def _broadcast_to(self, tensor, destinations): # This is both a fast path for Python constants, and a way to delay # converting Python values to a tensor until we know what type it # should be converted to. Otherwise we have trouble with: # global_step.assign_add(1) # since the `1` gets broadcast as an int32 but global_step is int64. if isinstance(tensor, (float, int)): return tensor # TODO(josh11b): In eager mode, use one thread per device, or async mode. if not destinations: # TODO(josh11b): Use current logical device instead of 0 here. destinations = values.LogicalDeviceSpec( device_map=self._device_map, logical_device=0) return self._get_cross_device_ops().broadcast(tensor, destinations) def _call_for_each_replica(self, fn, args, kwargs): return _call_for_each_replica(self._container_strategy(), self._device_map, fn, args, kwargs) def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None): del task_type, task_id if session_config: session_config.CopyFrom(self._update_config_proto(session_config)) if cluster_spec: # TODO(yuefengz): remove the following code once cluster_resolver is # added. num_gpus_per_worker = _infer_num_gpus_per_worker( self._device_map.all_devices) multi_worker_devices = _cluster_spec_to_device_list( cluster_spec, num_gpus_per_worker) self._initialize_multi_worker(multi_worker_devices) def _update_config_proto(self, config_proto): updated_config = copy.deepcopy(config_proto) updated_config.isolate_session_state = True return updated_config def _get_cross_device_ops(self): return self._cross_device_ops or self._inferred_cross_device_ops def _reduce_to(self, reduce_op, value, destinations): if (isinstance(value, values.Mirrored) and reduce_op == reduce_util.ReduceOp.MEAN): return value assert not isinstance(value, values.Mirrored) if not isinstance(value, values.DistributedValues): # This function handles reducing values that are not PerReplica or # Mirrored values. For example, the same value could be present on all # replicas in which case `value` would be a single value or value could # be 0. return cross_device_ops_lib.reduce_non_distributed_value( reduce_op, self._device_map, value, destinations) return self._get_cross_device_ops().reduce( reduce_op, value, destinations=destinations) def _batch_reduce_to(self, reduce_op, value_destination_pairs): return self._get_cross_device_ops().batch_reduce(reduce_op, value_destination_pairs) def _update(self, var, fn, args, kwargs, group): # TODO(josh11b): In eager mode, use one thread per device. assert isinstance(var, values.DistributedVariable) updates = [] for i, (d, v) in enumerate(zip(var.devices, var.values)): name = "update_%d" % i with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name): # If args and kwargs are not mirrored, the value is returned as is. updates.append(fn(v, *values.select_device_mirrored(d, args), **values.select_device_mirrored(d, kwargs))) return values.update_regroup(self, self._device_map, updates, group) def _update_non_slot(self, colocate_with, fn, args, kwargs, group): assert isinstance(colocate_with, tuple) # TODO(josh11b): In eager mode, use one thread per device. updates = [] for i, d in enumerate(colocate_with): name = "update_%d" % i with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name): updates.append(fn(*values.select_device_mirrored(d, args), **values.select_device_mirrored(d, kwargs))) return values.update_regroup(self, self._device_map, updates, group) def read_var(self, replica_local_var): """Read the aggregate value of a replica-local variable.""" if isinstance(replica_local_var, values.SyncOnReadVariable): return replica_local_var._get_cross_replica() # pylint: disable=protected-access assert isinstance(replica_local_var, values.Mirrored) return array_ops.identity(replica_local_var.get()) def _local_results(self, val): if isinstance(val, values.DistributedValues): return val.values return (val,) def value_container(self, val): return values.value_container(val) @property def _num_replicas_in_sync(self): return self._device_map.num_replicas_in_graph @property def worker_devices(self): return self._device_map.all_devices @property def worker_devices_by_replica(self): return self._device_map.devices_by_replica @property def parameter_devices(self): return self._device_map.all_devices @property def experimental_between_graph(self): return False @property def experimental_should_init(self): return True @property def should_checkpoint(self): return True @property def should_save_summary(self): return True def non_slot_devices(self, var_list): del var_list # TODO(josh11b): Should this be the last logical device instead? return self._device_map.logical_to_actual_devices(0) # TODO(priyag): Delete this once all strategies use global batch size. @property def _global_batch_size(self): """`make_dataset_iterator` and `make_numpy_iterator` use global batch size. `make_input_fn_iterator` assumes per-replica batching. Returns: Boolean. """ return True class _MirroredReplicaThread(threading.Thread): """A thread that runs() a function on a device.""" def __init__(self, dist, coord, replica_id, device_map, variable_creator_fn, fn, args, kwargs): super(_MirroredReplicaThread, self).__init__() self.coord = coord self.distribution = dist self.device_map = device_map self.replica_id = replica_id self.variable_creator_fn = variable_creator_fn # State needed to run and return the results of `fn`. self.main_fn = fn self.main_args = args self.main_kwargs = kwargs self.main_result = None self.done = False # State needed to run the next merge_call() (if any) requested via # ReplicaContext. self.merge_fn = None self.merge_args = None self.merge_kwargs = None self.merge_result = None self.captured_name_scope = None self.captured_var_scope = None # We use a thread.Event for the main thread to signal when this # thread should start running (`should_run`), and another for # this thread to transfer control back to the main thread # (`has_paused`, either when it gets to a # `get_replica_context().merge_call` or when `fn` returns). In # either case the event starts cleared, is signaled by calling # set(). The receiving thread waits for the signal by calling # wait() and then immediately clearing the event using clear(). self.should_run = threading.Event() self.has_paused = threading.Event() # These fields have to do with inheriting various contexts from the # parent thread: context.ensure_initialized() ctx = context.context() self.in_eager = ctx.executing_eagerly() self.record_thread_local_context_fields() self.context_device_policy = ( pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy( ctx._context_handle)) self.graph = ops.get_default_graph() with ops.init_scope(): self._init_in_eager = context.executing_eagerly() self._init_graph = ops.get_default_graph() self._variable_creator_stack = self.graph._variable_creator_stack[:] self._var_scope = variable_scope.get_variable_scope() # Adding a "/" at end lets us re-enter this scope later. self._name_scope = self.graph.get_name_scope() if self._name_scope: self._name_scope += "/" if self.replica_id > 0: if not self._name_scope: self._name_scope = "" self._name_scope += "replica_%d/" % self.replica_id def run(self): self.should_run.wait() self.should_run.clear() try: if self.coord.should_stop(): return self.restore_thread_local_context_fields() # TODO(josh11b): Use current logical device instead of 0 here. with self.coord.stop_on_exception(), \ _enter_graph(self._init_graph, self._init_in_eager), \ _enter_graph(self.graph, self.in_eager, self._variable_creator_stack), \ context.device_policy(self.context_device_policy), \ MirroredReplicaContext(self.distribution, constant_op.constant( self.replica_id, dtypes.int32)), \ ops.device(self.device_map.logical_to_actual_devices(0)[ self.replica_id]), \ ops.name_scope(self._name_scope), \ variable_scope.variable_scope( self._var_scope, reuse=self.replica_id > 0), \ variable_scope.variable_creator_scope(self.variable_creator_fn): self.main_result = self.main_fn(*self.main_args, **self.main_kwargs) self.done = True finally: self.has_paused.set() def record_thread_local_context_fields(self): """Record thread local fields of context.context() in self.""" ctx = context.context() self._summary_step = ctx.summary_step self._summary_writer = ctx.summary_writer self._summary_recording = ctx.summary_recording self._summary_recording_distribution_strategy = ( ctx.summary_recording_distribution_strategy) # TODO(b/125892694): record other fields in EagerContext. def restore_thread_local_context_fields(self): """Restore thread local fields of context.context() from self.""" ctx = context.context() ctx.summary_step = self._summary_step ctx.summary_writer = self._summary_writer ctx.summary_recording = self._summary_recording ctx.summary_recording_distribution_strategy = ( self._summary_recording_distribution_strategy) # TODO(b/125892694): restore other fields in EagerContext. class MirroredReplicaContext(distribute_lib.ReplicaContext): """ReplicaContext used in MirroredStrategy.extended.call_for_each_replica(). Opened in `_MirroredReplicaThread`, to allow the user to invoke `MirroredStrategy`'s specific implementation of `merge_call()`, which works by delegating the function and its arguments to the main thread (the one that invoked `MirroredStrategy.extended.call_for_each_replica()`). """ def _merge_call(self, fn, args, kwargs): """Delegate to the main thread to actually perform merge_call().""" t = threading.current_thread() # a _MirroredReplicaThread t.merge_fn = fn t.merge_args = args t.merge_kwargs = kwargs t.captured_name_scope = t.graph.get_name_scope() # Adding a "/" at end lets us re-enter this scope later. if t.captured_name_scope: t.captured_name_scope += "/" t.captured_var_scope = variable_scope.get_variable_scope() t.captured_control_deps = t.graph._current_control_dependencies() # pylint: disable=protected-access # NOTE(priyag): Throw an error if there is a merge call in the middle of a # `fn` passed to call_for_each_replica which changes the graph being used # while calling `fn`. This can happen when the `fn` is decorated with # `tf.function` and there is a merge_call in `fn`. This breaks because each # thread tries to create a distinct tf.function. Each tf.function creation # takes a lock, and so if there is a merge call in the middle, the lock is # never released and subsequent replica threads cannot proceed to define # their own functions. Checking for the graph being the same is one way for # us to check this didn't happen. if ops.get_default_graph() != t.graph: raise RuntimeError( "`merge_call` called while defining a new graph or a tf.function. " "This can often happen if the function `fn` passed to " "`strategy.experimental_run()` is decorated with " "`@tf.function` (or contains a nested `@tf.function`), and `fn` " "contains a synchronization point, such as aggregating gradients. " "This behavior is not yet supported. Instead, please wrap the entire " "call `strategy.experimental_run(fn)` in a `@tf.function`, and avoid " "nested `tf.function`s that may potentially cross a synchronization " "boundary.") t.has_paused.set() t.should_run.wait() t.should_run.clear() if t.coord.should_stop(): raise _RequestedStop() return t.merge_result @property def devices(self): distribute_lib.require_replica_context(self) replica_id = tensor_util.constant_value(self._replica_id_in_sync_group) return [self._strategy.extended.worker_devices_by_replica[replica_id]]
tensorflow-master
tensorflow/python/distribute/mirrored_strategy.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SharedVariableCreator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import shared_variable_creator from tensorflow.python.eager import test from tensorflow.python.framework import test_util from tensorflow.python.ops import variable_scope class CanonicalizeVariableNameTest(test.TestCase): def _canonicalize(self, name): return shared_variable_creator._canonicalize_variable_name(name) def testNoName(self): self.assertEquals("Variable", self._canonicalize(None)) def testPatternInMiddle(self): self.assertEquals("foo/bar/baz", self._canonicalize("foo_1/bar_1/baz")) def testPatternAtEnd(self): self.assertEquals("foo", self._canonicalize("foo_1")) def testWrongPatterns(self): self.assertEquals("foo_1:0", self._canonicalize("foo_1:0")) self.assertEquals("foo1", self._canonicalize("foo1")) self.assertEquals("foo_a", self._canonicalize("foo_a")) class SharedVariableCreatorTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testSharedVariable(self): shared_variable_store = {} num_devices = 3 creator_fns = [] for i in range(num_devices): creator_fn = shared_variable_creator.make_fn(shared_variable_store, i) creator_fns.append(creator_fn) with variable_scope.variable_creator_scope(creator_fns[0]): v0 = variable_scope.variable(1.0, name="foo") with variable_scope.variable_creator_scope(creator_fns[1]): v1 = variable_scope.variable(1.0, name="foo") with variable_scope.variable_creator_scope(creator_fns[2]): v2 = variable_scope.variable(1.0, name="foo") # v1 and v2 should be same as v0 self.assertIs(v1, v0) self.assertIs(v2, v0) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/shared_variable_creator_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Distribution Strategy library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.distribute import cluster_resolver from tensorflow.python.distribute import cross_device_ops from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import one_device_strategy from tensorflow.python.distribute.experimental import collective_all_reduce_strategy from tensorflow.python.distribute.experimental import parameter_server_strategy # pylint: enable=unused-import
tensorflow-master
tensorflow/python/distribute/__init__.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for device utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import device_util from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test class DeviceUtilTest(test.TestCase): @test_util.run_deprecated_v1 def testCurrentDeviceWithGlobalGraph(self): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/device:CPU:0") with ops.device("/job:worker"): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/job:worker/device:CPU:0") with ops.device("/cpu:0"): with ops.device("/gpu:0"): self.assertEqual(device_util.current(), "/device:GPU:0") def testCurrentDeviceWithNonGlobalGraph(self): with ops.Graph().as_default(): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/device:CPU:0") def testCurrentDeviceWithEager(self): with context.eager_mode(): with ops.device("/cpu:0"): self.assertEqual(device_util.current(), "/job:localhost/replica:0/task:0/device:CPU:0") @test_util.run_deprecated_v1 def testCanonicalizeWithoutDefaultDevice(self): self.assertEqual( device_util.canonicalize("/cpu:0"), "/replica:0/task:0/device:CPU:0") self.assertEqual( device_util.canonicalize("/job:worker/cpu:0"), "/job:worker/replica:0/task:0/device:CPU:0") self.assertEqual( device_util.canonicalize("/job:worker/task:1/cpu:0"), "/job:worker/replica:0/task:1/device:CPU:0") def testCanonicalizeWithDefaultDevice(self): self.assertEqual( device_util.canonicalize("/job:worker/task:1/cpu:0", default="/gpu:0"), "/job:worker/replica:0/task:1/device:CPU:0") self.assertEqual( device_util.canonicalize("/job:worker/task:1", default="/gpu:0"), "/job:worker/replica:0/task:1/device:GPU:0") self.assertEqual( device_util.canonicalize("/cpu:0", default="/job:worker"), "/job:worker/replica:0/task:0/device:CPU:0") def testResolveWithDeviceScope(self): with ops.device("/gpu:0"): self.assertEqual( device_util.resolve("/job:worker/task:1/cpu:0"), "/job:worker/replica:0/task:1/device:CPU:0") self.assertEqual( device_util.resolve("/job:worker/task:1"), "/job:worker/replica:0/task:1/device:GPU:0") with ops.device("/job:worker"): self.assertEqual( device_util.resolve("/cpu:0"), "/job:worker/replica:0/task:0/device:CPU:0") if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/device_util_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for running legacy optimizer code with DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute.single_loss_example import batchnorm_example from tensorflow.python.distribute.single_loss_example import minimize_loss_example from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.layers import core from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.ops.losses import losses_impl VAR_MAP_V1 = { "GradientDescent": ("dense/kernel", "dense/bias"), "Adagrad": ("dense/kernel/Adagrad", "dense/kernel", "dense/bias/Adagrad", "dense/bias") } VAR_MAP_V2 = { "SGD": ("dense/bias", "SGD/learning_rate", "SGD/decay", "SGD/iter", "dense/kernel", "SGD/momentum"), "Adagrad": ("Adagrad/iter", "dense/bias", "dense/kernel", "Adagrad/learning_rate", "Adagrad/decay", "Adagrad/dense/kernel/accumulator", "Adagrad/dense/bias/accumulator") } class MinimizeLossStepTest(test.TestCase, parameterized.TestCase): def _get_iterator(self, strategy, input_fn): iterator = strategy.make_input_fn_iterator(lambda _: input_fn()) self.evaluate(iterator.initialize()) return iterator @combinations.generate( combinations.times( strategy_combinations.distributions_and_v1_optimizers(), combinations.combine(mode=["graph"], use_callable_loss=[True, False]) + combinations.combine(mode=["eager"], use_callable_loss=[True])) + combinations.times( strategy_combinations.distributions_and_v2_optimizers(), combinations.combine( mode=["graph", "eager"], use_callable_loss=[True])) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations.optimizers_v2, mode=["graph"], use_callable_loss=[True]) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations.optimizers_v1, mode=["graph"], use_callable_loss=[True, False])) def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss): with distribution.scope(): optimizer = optimizer_fn() model_fn, dataset_fn, layer = minimize_loss_example( optimizer, use_bias=True, use_callable_loss=use_callable_loss) def step_fn(ctx, inputs): del ctx # Unused return distribution.group( distribution.extended.call_for_each_replica( model_fn, args=(inputs,))) iterator = self._get_iterator(distribution, dataset_fn) def run_step(): return distribution.extended.experimental_run_steps_on_iterator( step_fn, iterator, iterations=2).run_op if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) weights, biases = [], [] for _ in range(5): run_step() weights.append(self.evaluate(layer.kernel)) biases.append(self.evaluate(layer.bias)) error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1) is_not_increasing = all(y <= x for x, y in zip(error, error[1:])) self.assertTrue(is_not_increasing) @combinations.generate( combinations.times( strategy_combinations.distributions_and_v1_optimizers(), combinations.combine(mode=["graph"], use_callable_loss=[True, False]) + combinations.combine(mode=["eager"], use_callable_loss=[True])) + combinations.times( strategy_combinations.distributions_and_v2_optimizers(), combinations.combine( mode=["graph", "eager"], use_callable_loss=[True]))) def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn, use_callable_loss): with distribution.scope(): optimizer = optimizer_fn() model_fn, dataset_fn, layer = minimize_loss_example( optimizer, use_bias=True, use_callable_loss=use_callable_loss) iterator = self._get_iterator(distribution, dataset_fn) def run_step(): return distribution.group( distribution.extended.call_for_each_replica( model_fn, args=(iterator.get_next(),))) if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) weights, biases = [], [] for _ in range(10): run_step() weights.append(self.evaluate(layer.kernel)) biases.append(self.evaluate(layer.bias)) error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1) is_not_increasing = all(y <= x for x, y in zip(error, error[1:])) self.assertTrue(is_not_increasing) @combinations.generate( combinations.times( strategy_combinations.distributions_and_v1_and_v2_optimizers(), combinations.combine(mode=["graph", "eager"])) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations.optimizers_v1_and_v2, mode=["graph"])) def testOptimizerInsideModelFn(self, distribution, optimizer_fn): created_variables = [] trainable_variables = [] def appending_creator(next_creator, *args, **kwargs): v = next_creator(*args, **kwargs) created_variables.append(v.name) if "trainable" in kwargs and kwargs["trainable"]: trainable_variables.append(v.name) return v # Creator scope needs to be set before it's used inside # `distribution.scope`. with variable_scope.variable_creator_scope( appending_creator), distribution.scope(): optimizer = optimizer_fn() model_fn, dataset_fn, _ = minimize_loss_example( optimizer, use_bias=True, use_callable_loss=True) def step_fn(ctx, inputs): del ctx # Unused return distribution.group( distribution.extended.call_for_each_replica( model_fn, args=(inputs,))) iterator = self._get_iterator(distribution, dataset_fn) def run_step(): return distribution.extended.experimental_run_steps_on_iterator( step_fn, iterator, iterations=1).run_op if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) run_step() def get_expected_variables(num_parameter_devices): name = optimizer._name if isinstance(optimizer, optimizer_v2.OptimizerV2): variables = VAR_MAP_V2[name] else: variables = VAR_MAP_V1[name] extended_variables = [ v + "/replica_{}".format(replica) for v in variables for replica in range(1, num_parameter_devices) ] variables = list(variables) + extended_variables return set([v + ":0" for v in variables]) self.assertEqual( get_expected_variables(len(distribution.extended.parameter_devices)), set(created_variables)) @combinations.generate( combinations.times( combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]), combinations.times( strategy_combinations.distributions_and_v1_and_v2_optimizers(), combinations.combine( mode=["graph", "eager"], # TODO(isaprykin): Allow False here. Currently subsequent # replicas will re-execute UPDATE_OPS of previous replicas. update_ops_in_cross_replica_mode=[True])) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations.optimizers_v1_and_v2, mode=["graph"], update_ops_in_cross_replica_mode=[False]))) def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum, renorm, update_ops_in_cross_replica_mode): """Verifies that moving mean updates are reduced across replicas.""" with distribution.scope(): num_replicas = distribution.num_replicas_in_sync model_fn, dataset_fn, batchnorm = batchnorm_example( optimizer_fn, batch_per_epoch=num_replicas, momentum=momentum, renorm=renorm, update_ops_in_replica_mode=not update_ops_in_cross_replica_mode) def step_fn(ctx, inputs): del ctx # Unused fetches = distribution.experimental_local_results( distribution.extended.call_for_each_replica( model_fn, args=(inputs,))) if update_ops_in_cross_replica_mode: fetches += tuple(ops.get_collection(ops.GraphKeys.UPDATE_OPS)) return control_flow_ops.group(fetches) iterator = self._get_iterator(distribution, dataset_fn) def run_step(): return distribution.extended.experimental_run_steps_on_iterator( step_fn, iterator, iterations=1).run_op if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) expected_moving_means = [0.] * 8 def averaged_batch_mean(i): # Each batch has shape [16, 8] where the ith element in jth list is # (8 * j + i + replica_id * 100). So the batch mean in each replica is # (60 + i + replica_id * 100). So here comes its batch mean over all # replicas: return 60. + i + (num_replicas - 1.) / 2. * 100. for _ in range(10): run_step() moving_means = self.evaluate(batchnorm.moving_mean) # We make sure that the moving_mean is updated as if the sample mean is # calculated over all replicas. for i, expected_moving_mean in enumerate(expected_moving_means): expected_moving_means[i] -= (( expected_moving_mean - averaged_batch_mean(i)) * (1.0 - momentum)) self.assertNear(expected_moving_means[i], moving_means[i], 0.0001) @combinations.generate( combinations.times( combinations.combine(loss_reduction=[ losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN, losses_impl.Reduction.SUM_OVER_BATCH_SIZE, losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS ]), combinations.times( combinations.combine(distribution=[ strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus ]), combinations.times( combinations.combine(optimizer_fn=strategy_combinations .gradient_descent_optimizer_v1_fn), combinations.combine( mode=["graph"], use_callable_loss=[True, False]) + combinations.combine( mode=["eager"], use_callable_loss=[True])) + combinations.times( combinations.combine(optimizer_fn=strategy_combinations .gradient_descent_optimizer_keras_v2_fn), combinations.combine( mode=["graph", "eager"], use_callable_loss=[True]))) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations .gradient_descent_optimizer_v1_fn, mode=["graph"], use_callable_loss=[True, False]) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations .gradient_descent_optimizer_keras_v2_fn, mode=["graph"], use_callable_loss=[True]))) def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction, use_callable_loss): with distribution.scope(): all_vars = [] def model_fn(inputs): x, y = inputs w = variable_scope.get_variable("w", initializer=[[2.]]) all_vars.append(w) def loss_fn(): # Use fixed initialization to make the steps deterministic. predict = math_ops.matmul(x, w) loss = losses_impl.mean_squared_error( y, predict, reduction=loss_reduction) if loss_reduction == losses_impl.Reduction.SUM: return loss return loss / distribution.num_replicas_in_sync optimizer = optimizer_fn() # GradientDescent with 0.2 learning rate if isinstance(optimizer, optimizer_v2.OptimizerV2): return optimizer.minimize(loss_fn, [w]) else: if use_callable_loss: return optimizer.minimize(loss_fn) else: return optimizer.minimize(loss_fn()) def dataset_fn(): features = dataset_ops.Dataset.from_tensors([[2.], [7.]]) labels = dataset_ops.Dataset.from_tensors([[6.], [21.]]) return dataset_ops.Dataset.zip((features, labels)).repeat() def step_fn(ctx, inputs): del ctx # Unused return distribution.group( distribution.extended.call_for_each_replica( model_fn, args=(inputs,))) iterator = self._get_iterator(distribution, dataset_fn) def run_step(): return distribution.extended.experimental_run_steps_on_iterator( step_fn, iterator, iterations=1).run_op if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) run_step() v = all_vars[0] self.assertTrue(all(v is vi for vi in all_vars[1:])) weight = numpy.squeeze(self.evaluate(v)) # Our model is: # predict = x * w # loss = (predict - y)^2 # dloss/dpredict = 2*(predict - y) # dloss/dw = 2 * x^T @ (predict - y) # For our batch size of 2, assuming sum loss reduction: # x = [2, 7] # y = [6, 21] # w_initial = 2 # predict = [4, 14] # predict - y = [-2, -7] # dloss/dw = 2 <[2, 7], [-2, -7]> = - 2(4 + 49) = -106 # So unreplicated the update to w with lr=0.2 is -0.2 * -106 = 21.2 # with sum loss reduction, or 10.6 with mean. if loss_reduction == losses_impl.Reduction.SUM: # Note that the "distribution.num_replicas_in_sync" factor will go away # once we split the input across replicas, instead of pulling a complete # batch of input per replica. self.assertNear(weight, 2 + 21.2 * distribution.num_replicas_in_sync, 0.0001) else: # One of the mean loss reductions. self.assertNear(weight, 2 + 10.6, 0.0001) @combinations.generate( combinations.times( strategy_combinations.distributions_and_v1_and_v2_optimizers(), combinations.combine(mode=["graph", "eager"]), combinations.combine(is_tpu=[False])) + combinations.combine( distribution=[strategy_combinations.tpu_strategy], optimizer_fn=strategy_combinations.optimizers_v1_and_v2, mode=["graph"], is_tpu=[True])) def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu): with distribution.scope(): def dataset_fn(): dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat() # TODO(priyag): batch with drop_remainder=True causes shapes to be # fully defined for TPU. Remove this when XLA supports dynamic shapes. return dataset.batch(batch_size=1, drop_remainder=True) optimizer = optimizer_fn() layer = core.Dense(1, use_bias=True) key1 = "foo" value1 = "bar" def model_fn(output_context, x): """A very simple model written by the user.""" def loss_fn(): y = array_ops.reshape(layer(x), []) - constant_op.constant(1.) return y * y if isinstance(optimizer, optimizer_v2.OptimizerV2): train_op = optimizer.minimize( loss_fn, lambda: layer.trainable_variables) else: train_op = optimizer.minimize(loss_fn) loss = loss_fn() output_context.set_last_step_output( name="replica_loss_reduced", output=loss, reduce_op=reduce_util.ReduceOp.MEAN) output_context.set_non_tensor_output(key1, value1) return (train_op, loss) def step_fn(output_context, inputs): (train_op, loss) = distribution.extended.call_for_each_replica( model_fn, args=(output_context, inputs)) output_context.set_last_step_output( name="cross_replica_loss_reduced", output=loss, reduce_op=reduce_util.ReduceOp.MEAN) output_context.set_last_step_output( name="cross_replica_loss_not_reduced", output=loss) return distribution.group(train_op) iterator = self._get_iterator(distribution, dataset_fn) def run_step(): initial_loss = lambda: constant_op.constant(1e7) # Initial values corresponding to reduced losses are just single # tensors. But for non reduced losses, we need to have initial # values that are of the same structure as non reduced losses. In # MirroredStrategy, this will be a list of losses, in TPUStrategy # it will be single tensor. Using `call_for_each_replica` followed # by `experimental_local_results` gives us the desired initial # value structure. not_reduced = distribution.experimental_local_results( distribution.extended.call_for_each_replica(initial_loss)) initial_loop_values = { "replica_loss_reduced": initial_loss(), "cross_replica_loss_reduced": initial_loss(), "cross_replica_loss_not_reduced": not_reduced, } ctx = distribution.extended.experimental_run_steps_on_iterator( step_fn, iterator, iterations=2, initial_loop_values=initial_loop_values) self.assertEqual({key1: (value1,)}, ctx.non_tensor_outputs) self._verify_loss_output( initial_loss(), loss_output=ctx.last_step_outputs["replica_loss_reduced"], reduced=True, distribution=distribution) self._verify_loss_output( initial_loss(), loss_output=ctx.last_step_outputs["cross_replica_loss_reduced"], reduced=True, distribution=distribution) self._verify_loss_output( initial_loss(), loss_output=ctx.last_step_outputs["cross_replica_loss_not_reduced"], reduced=False, distribution=distribution) return (ctx.run_op, ctx.last_step_outputs["replica_loss_reduced"]) if not context.executing_eagerly(): with self.cached_session() as sess: run_step = sess.make_callable(run_step()) self.evaluate(variables_lib.global_variables_initializer()) weights, biases, losses = [], [], [] for _ in range(5): _, loss = run_step() losses.append(loss) weights.append(self.evaluate(layer.kernel)) biases.append(self.evaluate(layer.bias)) loss_is_not_increasing = all(y <= x for x, y in zip(losses, losses[1:])) self.assertTrue(loss_is_not_increasing) error = abs( numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1) error_is_not_increasing = all(y <= x for x, y in zip(error, error[1:])) self.assertTrue(error_is_not_increasing) def _verify_loss_output(self, initial_loss, loss_output, reduced, distribution): if not reduced: self.assertLen(distribution.experimental_local_results(loss_output), distribution.num_replicas_in_sync) loss_tensor = distribution.reduce(reduce_util.ReduceOp.MEAN, loss_output, axis=None) else: unwrapped_output = distribution.experimental_local_results(loss_output) self.assertLen(unwrapped_output, 1) loss_tensor = unwrapped_output[0] self.assertEqual(initial_loss.dtype, loss_tensor.dtype) self.assertEqual(initial_loss.shape, loss_tensor.shape) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/minimize_loss_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the input_lib library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import threading from absl.testing import parameterized from tensorflow.python import tf2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import collective_all_reduce_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import input_lib from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import multi_worker_test_base from tensorflow.python.distribute import parameter_server_strategy from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import values from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import errors from tensorflow.python.ops import control_flow_ops from tensorflow.python.util import nest class DistributedIteratorTestBase(test.TestCase): # The passed input_context is to create a sharded dataset in between-graph # case. def _wrap_iterator(self, input_type, dataset_or_input_fn, input_workers, devices, split_batch_by, strategy, input_context=None): # The `input_context` passed in is to shard dataset for # MultiWorkerMirroredStrategy. It doesn't apply to in-graph case where # multiple InputContexts are needed. if input_type == "input_fn": self.assertIsNone( input_context, msg=("`The input_context` arg is only used to shard dataset in " "`MultiWorkerMirroredStrategy` when the input type is dataset.")) input_contexts = [] for i in range(input_workers.num_workers): input_contexts.append( distribute_lib.InputContext( # Note: `input_workers.num_workers` is always 1 in between-graph # case. num_input_pipelines=input_workers.num_workers, input_pipeline_id=i, num_replicas_in_sync=len(devices))) iterator = input_lib.InputFunctionIterator( dataset_or_input_fn, input_workers, input_contexts, strategy) else: iterator = input_lib.DatasetIterator( dataset_or_input_fn, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) return iterator def _wrap_dataset(self, input_type, dataset, input_workers, split_batch_by, strategy, input_context=None): if isinstance(dataset, dataset_ops.Dataset): return input_lib.DistributedDatasetV1( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) else: return input_lib.DistributedDataset( dataset, input_workers, strategy, split_batch_by=split_batch_by, input_context=input_context) def _test_input_iteration(self, input_type, api_type, iteration_type, dataset_or_input_fn, worker_device_pairs, expected_values, strategy, sess=None, split_batch_by=None, input_context=None): if iteration_type == "for_loop" and not context.executing_eagerly(): self.skipTest("unsupported test combination.") if api_type == "wrap_into_iterator" and iteration_type == "for_loop": self.skipTest("unsupported test combination.") if api_type == "wrap_into_dataset" and input_type == "input_fn": self.skipTest("unsupported test combination.") devices = nest.flatten([ds for _, ds in worker_device_pairs]) device_map = values.ReplicaDeviceMap(devices) input_workers = input_lib.InputWorkers(device_map, worker_device_pairs) if api_type == "wrap_into_iterator": iterator = self._wrap_iterator( input_type, dataset_or_input_fn, input_workers, devices, split_batch_by, strategy, input_context=input_context) else: # wrapping into a dataset: given_dataset = dataset_or_input_fn dataset = self._wrap_dataset( input_type, given_dataset, input_workers, split_batch_by, strategy, input_context=input_context) if context.executing_eagerly(): iterator = iter(dataset) else: # The dataset can be a tf.data.DatasetV1Adapter instance since we wrap # tf.data.DatasetV1 as a tf.data.DatasetV1Adapter instance when we # autoshard the dataset. if not isinstance(dataset, (dataset_ops.DatasetV1, dataset_ops.DatasetV1Adapter)): iterator = iter(dataset) else: iterator = dataset.make_one_shot_iterator() if iteration_type == "get_next": evaluate = lambda x: sess.run(x) if sess else self.evaluate(x) if isinstance(iterator, input_lib.DistributedIteratorV1): evaluate(control_flow_ops.group(iterator.initialize())) else: evaluate(control_flow_ops.group(iterator._initializer)) for expected_value in expected_values: next_element = iterator.get_next() computed_value = evaluate( [values.select_replica(r, next_element) for r in range(len(devices))]) self.assertEqual(len(expected_value), len(computed_value)) for i in range(len(expected_value)): self.assertAllEqual(expected_value[i], computed_value[i]) with self.assertRaises(errors.OutOfRangeError): next_element = iterator.get_next() evaluate( [values.select_replica(r, next_element) for r in range(len(devices))]) # After re-initializing the iterator, should be able to iterate again. if isinstance(iterator, input_lib.DistributedIteratorV1): evaluate(control_flow_ops.group(iterator.initialize())) else: evaluate(control_flow_ops.group(iterator._initializer)) for expected_value in expected_values: next_element = iterator.get_next() computed_value = evaluate( [values.select_replica(r, next_element) for r in range(len(devices))]) self.assertEqual(len(expected_value), len(computed_value)) for i in range(len(expected_value)): self.assertAllEqual(expected_value[i], computed_value[i]) if iteration_type == "for_loop" and context.executing_eagerly(): actual_values = [] for x in dataset: computed_value = self.evaluate( [values.select_replica(r, x) for r in range(len(devices))]) actual_values.append(computed_value) for i, expected_value in enumerate(expected_values): self.assertEqual(len(expected_value), len(actual_values[i])) for j in range(len(expected_value)): self.assertAllEqual(expected_value[j], actual_values[i][j]) def _create_dataset_or_input_fn(self, input_type, input_fn): if input_type == "input_fn": return input_fn else: return input_fn(distribute_lib.InputContext()) class DistributedIteratorSingleWorkerTest(DistributedIteratorTestBase, parameterized.TestCase): @combinations.generate( combinations.combine( mode=["graph", "eager"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], distribution=[ strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_one_cpu ], enable_get_next_as_optional=[True, False])) def testOneDeviceCPU(self, input_type, api_type, iteration_type, distribution, enable_get_next_as_optional): worker_device_pairs = [("", ["/device:CPU:0"])] if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(10) else: dataset_fn = lambda _: dataset_ops.Dataset.range(10) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) expected_values = [[i] for i in range(10)] distribution.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_device_pairs, expected_values, distribution) @combinations.generate( combinations.combine( mode=["graph", "eager"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.central_storage_strategy_with_gpu_and_cpu ], enable_get_next_as_optional=[True, False])) def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type, distribution, enable_get_next_as_optional): worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])] if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(10) else: dataset_fn = lambda _: dataset_ops.Dataset.range(10) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) expected_values = [[i, i+1] for i in range(0, 10, 2)] distribution.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_device_pairs, expected_values, distribution) @combinations.generate( combinations.combine( mode=["graph", "eager"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], distribution=[strategy_combinations.tpu_strategy], enable_get_next_as_optional=[True, False])) def testTPU(self, input_type, api_type, iteration_type, distribution, enable_get_next_as_optional): worker_device_pairs = collections.OrderedDict() for tpu_device in distribution.extended._tpu_devices: host_device = device_util.get_host_for_device(tpu_device) worker_device_pairs.setdefault(host_device, []) worker_device_pairs[host_device].append(tpu_device) worker_device_pairs = worker_device_pairs.items() if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(10) else: dataset_fn = lambda _: dataset_ops.Dataset.range(10) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) expected_values = [[i, i + 1] for i in range(0, 10, 2)] distribution.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_device_pairs, expected_values, distribution) @combinations.generate( combinations.combine( mode=["graph", "eager"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.central_storage_strategy_with_gpu_and_cpu ], enable_get_next_as_optional=[True, False])) def testTupleDataset(self, input_type, api_type, iteration_type, distribution, enable_get_next_as_optional): worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])] def dataset_fn(ctx): del ctx if tf2.enabled(): dataset1 = dataset_ops.Dataset.range(10) dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2) return dataset_ops.Dataset.zip((dataset1, dataset2)) else: dataset1 = dataset_ops.DatasetV2.range(10) dataset2 = dataset_ops.DatasetV2.range(10).map(lambda x: x**2) return dataset_ops.DatasetV2.zip((dataset1, dataset2)) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) expected_values = [[(i, i**2), (i+1, (i+1)**2)] for i in range(0, 10, 2)] distribution.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_device_pairs, expected_values, distribution) @combinations.generate( combinations.combine( mode=["graph", "eager"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.central_storage_strategy_with_gpu_and_cpu ])) def testUnevenDatasetBatches(self, input_type, api_type, iteration_type, distribution): worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])] if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2) else: dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) # The last global batch only contains data for one replica. expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]] distribution.extended.experimental_enable_get_next_as_optional = True self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_device_pairs, expected_values, distribution) @combinations.generate( combinations.combine( mode=["graph", "eager"], input_type=["dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], split_batch_by=[None, 2], distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.central_storage_strategy_with_gpu_and_cpu ], enable_get_next_as_optional=[True, False])) def testBatchSplitting(self, input_type, api_type, iteration_type, split_batch_by, distribution, enable_get_next_as_optional): worker_device_pairs = [("", ["/device:GPU:0", "/device:CPU:0"])] batch_size = 10 if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(100).batch(batch_size) else: dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) updated_batch_size = ( batch_size // split_batch_by if split_batch_by else batch_size) expected_values = [[range(i, i+updated_batch_size), range(i+updated_batch_size, i+2*updated_batch_size)] for i in range(0, 100, updated_batch_size*2)] distribution.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_device_pairs, expected_values, distribution, sess=None, split_batch_by=split_batch_by) class DistributedIteratorMultiWorkerTest( multi_worker_test_base.MultiWorkerTestBase, DistributedIteratorTestBase, parameterized.TestCase): def _cpu_devices(self): return [ ("/job:worker/replica:0/task:0", ["/job:worker/replica:0/task:0/device:CPU:0"]), ("/job:worker/replica:0/task:1", ["/job:worker/replica:0/task:1/device:CPU:0"])] def _cpu_and_one_gpu_devices(self): return [ ("/job:worker/replica:0/task:0", [ "/job:worker/replica:0/task:0/device:GPU:0", "/job:worker/replica:0/task:0/device:CPU:0" ]), ("/job:worker/replica:0/task:1", [ "/job:worker/replica:0/task:1/device:GPU:0", "/job:worker/replica:0/task:1/device:CPU:0" ]) ] @combinations.generate(combinations.combine( mode=["graph"], input_type=["dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], autoshard=[True, False])) def testAutoshardingOption(self, input_type, api_type, iteration_type, autoshard): ds_option = dataset_ops.Options() ds_option.experimental_distribute.auto_shard = autoshard if tf2.enabled(): dataset_fn = ( lambda _: dataset_ops.DatasetV2.range(4).with_options(ds_option)) else: dataset_fn = ( lambda _: dataset_ops.Dataset.range(4).with_options(ds_option)) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) strategy = mirrored_strategy.MirroredStrategy( devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]), cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce( ["/job:worker/task:0", "/job:worker/task:1"], 1)) worker_devices = self._cpu_devices() with context.graph_mode(), self.cached_session() as sess: if autoshard: expected_values = [[0, 1], [2, 3]] else: expected_values = [[0, 0], [1, 1], [2, 2], [3, 3]] self._test_input_iteration(input_type, api_type, iteration_type, dataset_or_input_fn, worker_devices, expected_values, strategy, sess) @combinations.generate( combinations.combine( mode=["graph"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], enable_get_next_as_optional=[True, False])) def testOneDevicePerWorker(self, input_type, api_type, iteration_type, enable_get_next_as_optional): if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(4) else: dataset_fn = lambda _: dataset_ops.Dataset.range(4) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) strategy = mirrored_strategy.MirroredStrategy( devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]), cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce( ["/job:worker/task:0", "/job:worker/task:1"], 1)) worker_devices = self._cpu_devices() with context.graph_mode(), strategy.scope(), self.cached_session() as sess: if input_type == "dataset": # Autosharded expected_values = [[0, 1], [2, 3]] else: expected_values = [[0, 0], [1, 1], [2, 2], [3, 3]] strategy.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_devices, expected_values, strategy, sess=sess) @combinations.generate( combinations.combine( mode=["graph"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], enable_get_next_as_optional=[True, False], required_gpus=1)) def testTwoDevicesPerWorker(self, input_type, api_type, iteration_type, enable_get_next_as_optional): if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(4) else: dataset_fn = lambda _: dataset_ops.Dataset.range(4) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) strategy = mirrored_strategy.MirroredStrategy( devices=(self._cpu_and_one_gpu_devices()[0][1] + self._cpu_and_one_gpu_devices()[1][1]), cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce( ["/job:worker/task:0", "/job:worker/task:1"], 2)) worker_devices = self._cpu_and_one_gpu_devices() with context.graph_mode(), strategy.scope(), self.cached_session() as sess: if input_type == "dataset": # Autosharded expected_values = [[0, 2, 1, 3]] else: expected_values = [[0, 1, 0, 1], [2, 3, 2, 3]] strategy.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_devices, expected_values, strategy, sess=sess) @combinations.generate( combinations.combine( mode=["graph"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], enable_get_next_as_optional=[True, False])) def testTupleDataset(self, input_type, api_type, iteration_type, enable_get_next_as_optional): strategy = mirrored_strategy.MirroredStrategy( devices=(self._cpu_devices()[0][1] + self._cpu_devices()[1][1]), cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce( ["/job:worker/task:0", "/job:worker/task:1"], 1)) worker_devices = self._cpu_devices() def dataset_fn(ctx): del ctx if tf2.enabled(): dataset1 = dataset_ops.DatasetV2.range(4) dataset2 = dataset_ops.DatasetV2.range(4).map(lambda x: x**2) return dataset_ops.DatasetV2.zip((dataset1, dataset2)) else: dataset1 = dataset_ops.Dataset.range(4) dataset2 = dataset_ops.Dataset.range(4).map(lambda x: x**2) return dataset_ops.Dataset.zip((dataset1, dataset2)) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) with context.graph_mode(), strategy.scope(), self.cached_session() as sess: if input_type == "dataset": # Autosharded expected_values = [[(0, 0), (1, 1)], [(2, 4), (3, 9)]] else: expected_values = [[(i, i**2), (i, i**2)] for i in range(0, 4)] strategy.extended.experimental_enable_get_next_as_optional = ( enable_get_next_as_optional) self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_devices, expected_values, strategy, sess=sess) @combinations.generate( combinations.combine( mode=["graph"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], required_gpus=1)) def testUnevenDatasetBatches(self, input_type, api_type, iteration_type): strategy = mirrored_strategy.MirroredStrategy( devices=(self._cpu_and_one_gpu_devices()[0][1] + self._cpu_and_one_gpu_devices()[1][1]), cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce( ["/job:worker/task:0", "/job:worker/task:1"], 2)) if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(9).batch(2) else: dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch(2) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) worker_devices = self._cpu_and_one_gpu_devices() with context.graph_mode(), strategy.scope(), self.cached_session() as sess: if input_type == "dataset": # Autosharded expected_values = [[[0, 1], [4, 5], [2, 3], [6, 7]], [[8], [], [], []]] else: expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]], [[4, 5], [6, 7], [4, 5], [6, 7]], [[8], [], [8], []]] strategy.extended.experimental_enable_get_next_as_optional = True self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_devices, expected_values, strategy, sess=sess) @combinations.generate( combinations.combine( mode=["graph"], input_type=["input_fn", "dataset"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next"], strategy_cls=[ collective_all_reduce_strategy.CollectiveAllReduceStrategy, parameter_server_strategy.ParameterServerStrategy, ], required_gpus=0)) def testUnevenDatasetBatchesBetweenGraph(self, input_type, api_type, iteration_type, strategy_cls): if api_type == "wrap_into_dataset" and input_type == "input_fn": self.skipTest("unsupported test combination.") # Environment variable is global, we need locking when patching TF_CONFIG. lock = threading.Lock() def _worker_fn(task_type, task_id, num_gpus): del num_gpus tf_config = { "cluster": self._cluster_spec, "task": { "type": task_type, "index": task_id } } with context.graph_mode(), lock, test.mock.patch.dict( "os.environ", {"TF_CONFIG": json.dumps(tf_config)}): strategy = strategy_cls() with context.graph_mode(), strategy.scope(), self.cached_session( target="grpc://" + self._cluster_spec[task_type][task_id]) as sess: if tf2.enabled(): dataset_fn = lambda _: dataset_ops.DatasetV2.range(5).batch(2) else: dataset_fn = lambda _: dataset_ops.Dataset.range(5).batch(2) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) if (input_type == "dataset" and strategy_cls is collective_all_reduce_strategy.CollectiveAllReduceStrategy): # Autosharded if task_id == 0: expected_values = [[[0, 1]], [[4]]] else: expected_values = [[[2, 3]], [[]]] # input_context is for between-graph auto-sharding. input_context = distribute_lib.InputContext( num_input_pipelines=2, input_pipeline_id=task_id, num_replicas_in_sync=2) else: expected_values = [[[0, 1]], [[2, 3]], [[4]]] input_context = None strategy.extended.experimental_enable_get_next_as_optional = True self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, [("/job:%s/task:%d" % (task_type, task_id), strategy.extended.worker_devices)], expected_values, strategy, sess=sess, input_context=input_context) return True self._run_between_graph_clients(_worker_fn, self._cluster_spec, 0) @combinations.generate( combinations.combine( mode=["graph"], input_type=["input_fn"], api_type=["wrap_into_iterator", "wrap_into_dataset"], iteration_type=["get_next", "for_loop"], required_gpus=1)) def testDifferentDatasets(self, input_type, api_type, iteration_type): def dataset_fn(ctx): if ctx.input_pipeline_id == 0: return dataset_ops.Dataset.range(8).batch(2) else: return dataset_ops.Dataset.range(9).batch(2) dataset_or_input_fn = self._create_dataset_or_input_fn( input_type, dataset_fn) strategy = mirrored_strategy.MirroredStrategy( devices=(self._cpu_and_one_gpu_devices()[0][1] + self._cpu_and_one_gpu_devices()[1][1]), cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce( ["/job:worker/task:0", "/job:worker/task:1"], 2)) worker_devices = self._cpu_and_one_gpu_devices() with context.graph_mode(), strategy.scope(), self.cached_session() as sess: expected_values = [[[0, 1], [2, 3], [0, 1], [2, 3]], [[4, 5], [6, 7], [4, 5], [6, 7]], [[], [], [8], []]] strategy.extended.experimental_enable_get_next_as_optional = True self._test_input_iteration( input_type, api_type, iteration_type, dataset_or_input_fn, worker_devices, expected_values, strategy, sess=sess) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/input_lib_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================== """Contains utility functions used by summary ops in distribution strategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import distribution_strategy_context from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util def skip_summary(): """Determines if summary should be skipped. If using multiple replicas in distributed strategy, skip summaries on all replicas except the first one (replica_id=0). Returns: True if the summary is skipped; False otherwise. """ # TODO(priyag): Add a new optional argument that will provide multiple # alternatives to override default behavior. (e.g. run on last replica, # compute sum or mean across replicas). replica_context = distribution_strategy_context.get_replica_context() if not replica_context: return False # TODO(b/118385803): when replica_id of _TPUReplicaContext is properly # initialized, remember to change here as well. replica_id = replica_context.replica_id_in_sync_group if isinstance(replica_id, ops.Tensor): replica_id = tensor_util.constant_value(replica_id) return replica_id and replica_id > 0
tensorflow-master
tensorflow/python/distribute/summary_op_util.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for all_reduce.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from tensorflow.core.framework import types_pb2 from tensorflow.python.distribute import all_reduce as ar from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging class AllReduceTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testFlattenTensorsShapesDefined(self): x = array_ops.placeholder(types_pb2.DT_FLOAT, [None]) with self.assertRaisesRegexp(ValueError, "must have statically known shape"): ar._flatten_tensors([x, x]) def testRingPermutations(self): # 0 devices pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 0, []) self.assertEqual(pred_by_c_d, []) self.assertEqual(rank_by_c_d, []) # 1 worker, 1 subchunk cases pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0]) self.assertEqual(pred_by_c_d, [[0]]) self.assertEqual(rank_by_c_d, [[0]]) pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0, 1, 2]) self.assertEqual(pred_by_c_d, [[2, 0, 1]]) self.assertEqual(rank_by_c_d, [[0, 1, 2]]) # multiple workers, 1 subchunk cases pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [0, 1, 2]) self.assertEqual(pred_by_c_d, [[5, 0, 1, 2, 3, 4]]) self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5]]) pred_by_c_d, rank_by_c_d = ar._ring_permutations(3, 1, [0, 1, 2]) self.assertEqual(pred_by_c_d, [[8, 0, 1, 2, 3, 4, 5, 6, 7]]) self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7, 8]]) pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [2, 1, 0]) self.assertEqual(pred_by_c_d, [[1, 2, 3, 4, 5, 0]]) self.assertEqual(rank_by_c_d, [[2, 1, 0, 5, 4, 3]]) # 1 worker, multiple subchunk cases pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3]) self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2]]) self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [2, 3, 0, 1]]) pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 4, [0, 1, 2, 3]) self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2], [3, 0, 1, 2], [3, 0, 1, 2]]) self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [3, 0, 1, 2], [2, 3, 0, 1], [1, 2, 3, 0]]) # multiple worker, multiple subchunk cases pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 1, 2, 3]) self.assertEqual(pred_by_c_d, [[7, 0, 1, 2, 3, 4, 5, 6], [3, 0, 5, 2, 7, 4, 1, 6]]) self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7], [2, 3, 0, 1, 6, 7, 4, 5]]) pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 3, 2, 1]) self.assertEqual(pred_by_c_d, [[5, 2, 3, 0, 1, 6, 7, 4], [1, 2, 7, 0, 5, 6, 3, 4]]) self.assertEqual(rank_by_c_d, [[0, 3, 2, 1, 4, 7, 6, 5], [2, 1, 0, 3, 6, 5, 4, 7]]) def _buildInput(self, num_workers, num_gpus): t8 = constant_op.constant( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], types_pb2.DT_FLOAT) input_tensors = [] device_names = [] for w in range(0, num_workers): for d in range(0, num_gpus): dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus) device_names.append(dn) with ops.device(dn): input_tensors.append(array_ops.identity(t8)) return input_tensors, device_names @test_util.run_deprecated_v1 def testBuildRingGatherPassStructure(self): # 1 worker, 1 device input_tensors, device_names = self._buildInput(1, 1) pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0]) output_tensors = ar._build_ring_gather(input_tensors, device_names, 1, pred_by_c_d, rank_by_c_d, math_ops.add) self.assertEqual(output_tensors, input_tensors) # 1 worker, 4 devices, 2 subchunks input_tensors, device_names = self._buildInput(1, 4) pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3]) output_tensors, pad_len = ar._build_ring_gather( input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add) self.assertEqual(0, pad_len) # same number outputs as inputs self.assertEqual(len(output_tensors), len(input_tensors)) num_chunks = 2 * len(input_tensors) tlen = tensor_shape.dimension_value(input_tensors[0].shape[0]) for otl in output_tensors: self.assertEqual(len(otl), num_chunks) for ot in otl: self.assertEqual(ot.shape, [tlen/num_chunks]) def _buildInitialVars(self, shape, dev_list): values = [] num_devices = len(dev_list) dim = np.prod(shape) if shape else 1 for d in range(0, num_devices): with ops.device(dev_list[d]): npt = np.zeros(shape).astype(np.float32) alias = np.frombuffer(npt.data, dtype=np.float32) for i in range(0, dim): alias[i] = i + 0.01 * d var = state_ops.variable_op(shape, types_pb2.DT_FLOAT) state_ops.init_variable(var, npt).op.run() values.append(var) return values # pylint: disable=g-long-lambda def _buildRing(self, num_workers, num_gpus, subdiv): gpu_perm = range(0, num_gpus) return lambda x, un_op: ar.build_ring_all_reduce( x, num_workers, subdiv, gpu_perm, math_ops.add, un_op) def _testAllReduce(self, num_workers, num_gpus, shape, build_f): # Use local CPU as device for all inputs. num_devices = num_workers * num_gpus dev_list = ["/replica:0/task:0/device:CPU:0" for _ in range(num_devices)] with self.cached_session(): input_tensors = self._buildInitialVars(shape, dev_list) un_op = lambda x: math_ops.div( x, constant_op.constant(num_devices, dtype=types_pb2.DT_FLOAT)) simple_sum = math_ops.add_n(input_tensors) simple_sum.op.run() output_tensors = build_f(input_tensors, un_op) sum_reduced = math_ops.add_n(output_tensors) sum_reduced.op.run() self.assertAllClose(sum_reduced.eval(), self.evaluate(simple_sum)) def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv): start_time = time.time() build_f = self._buildRing(num_workers, num_gpus, subdiv) self._testAllReduce(num_workers, num_gpus, shape, build_f) elapsed = time.time() - start_time tf_logging.info("RingAllReduce num_workers=%d num_gpus=%d shape=%s " "subdiv=%d elapsed=%f" % (num_workers, num_gpus, shape, subdiv, elapsed)) @test_util.run_deprecated_v1 def testRingAllReduce(self): self._testRingAllReduce(1, 2, [], 1) self._testRingAllReduce(1, 2, [8], 1) self._testRingAllReduce(1, 2, [4, 4], 1) self._testRingAllReduce(6, 1, [8], 1) self._testRingAllReduce(1, 8, [32], 1) self._testRingAllReduce(1, 8, [120], 1) self._testRingAllReduce(2, 8, [7, 13], 1) self._testRingAllReduce(2, 8, [8, 8], 2) self._testRingAllReduce(2, 8, [8, 8], 4) # TODO(tucker): The following test is surprisingly slow. # Diagnose and fix before re-enabling. # self._testRingAllReduce(4, 8, [8, 8, 2], 4) def _buildShuffle(self, num_workers, num_gpus, num_shards): # Use local CPU for all shuffle shards gather_devices = ["/replica:0/task:0/device:CPU:0" for _ in range(num_shards)] return lambda x, un_op: ar.build_shuffle_all_reduce( x, gather_devices, math_ops.add_n, un_op) def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards): start_time = time.time() build_f = self._buildShuffle(num_workers, num_gpus, num_shards) self._testAllReduce(num_workers, num_gpus, shape, build_f) elapsed = time.time() - start_time tf_logging.info("ShuffleAllReduce num_workers=%d num_gpus=%d shape=%s " "elapsed=%f" % (num_workers, num_gpus, shape, elapsed)) @test_util.run_deprecated_v1 def testShuffleAllReduce(self): self._testShuffleAllReduce(1, 2, [], 1) self._testShuffleAllReduce(1, 2, [8], 1) self._testShuffleAllReduce(1, 2, [4, 4], 1) self._testShuffleAllReduce(1, 8, [32], 1) self._testShuffleAllReduce(1, 8, [120], 1) self._testShuffleAllReduce(2, 8, [7, 13], 3) self._testShuffleAllReduce(2, 8, [8, 8], 2) self._testShuffleAllReduce(2, 8, [8, 8], 4) self._testShuffleAllReduce(4, 8, [8, 8, 2], 4) def _buildRecursiveHD(self, num_workers, num_gpus): return lambda x, un_op: ar.build_recursive_hd_all_reduce( x, math_ops.add, un_op) # pylint: enable=g-long-lambda def _testRecursiveHDAllReduce(self, num_workers, num_gpus, shape): start_time = time.time() build_f = self._buildRecursiveHD(num_workers, num_gpus) self._testAllReduce(num_workers, num_gpus, shape, build_f) elapsed = time.time() - start_time tf_logging.info("RecursiveHDAllReduce num_workers=%d num_gpus=%d " "shape=%s elapsed=%f" % (num_workers, num_gpus, shape, elapsed)) @test_util.run_deprecated_v1 def testRecursiveHDAllReduce(self): self._testRecursiveHDAllReduce(1, 2, [8]) self._testRecursiveHDAllReduce(1, 2, [4, 4]) self._testRecursiveHDAllReduce(1, 8, [32]) self._testRecursiveHDAllReduce(1, 8, [120]) self._testRecursiveHDAllReduce(2, 8, [8, 8]) self._testRecursiveHDAllReduce(4, 8, [8, 8, 2]) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/all_reduce_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for a little bit of strategy_combinations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.distribute import combinations from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import strategy_combinations from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.platform import test class StrategyCombinationsTest(test.TestCase, parameterized.TestCase): def setUp(self): # Need to call set_virtual_cpus_to_at_least() in setUp with the maximum # value needed in any test. strategy_combinations.set_virtual_cpus_to_at_least(3) super(StrategyCombinationsTest, self).setUp() def test3VirtualCPUs(self): cpu_device = config.list_physical_devices("CPU")[0] self.assertLen(config.get_virtual_device_configuration(cpu_device), 3) def testSetVirtualCPUsAgain(self): strategy_combinations.set_virtual_cpus_to_at_least(2) cpu_device = config.list_physical_devices("CPU")[0] self.assertLen(config.get_virtual_device_configuration(cpu_device), 3) def testSetVirtualCPUsErrors(self): with self.assertRaises(ValueError): strategy_combinations.set_virtual_cpus_to_at_least(0) with self.assertRaisesRegexp(RuntimeError, "with 3 < 5 virtual CPUs"): strategy_combinations.set_virtual_cpus_to_at_least(5) @combinations.generate(combinations.combine( distribution=[strategy_combinations.mirrored_strategy_with_cpu_1_and_2], mode=["graph", "eager"])) def testMirrored2CPUs(self, distribution): with distribution.scope(): one_per_replica = distribution.experimental_run_v2( lambda: constant_op.constant(1)) num_replicas = distribution.reduce( reduce_util.ReduceOp.SUM, one_per_replica, axis=None) self.assertEqual(2, self.evaluate(num_replicas)) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/strategy_combinations_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes implementing a multi-worker ps DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import input_lib from tensorflow.python.distribute import mirrored_strategy from tensorflow.python.distribute import multi_worker_util from tensorflow.python.distribute import numpy_dataset from tensorflow.python.distribute import values from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver from tensorflow.python.eager import context from tensorflow.python.framework import device as tf_device from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import device_setter from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export _LOCAL_CPU = "/device:CPU:0" # TODO(yuefengz): maybe cache variables on local CPU. @tf_export("distribute.experimental.ParameterServerStrategy", v1=[]) class ParameterServerStrategy(distribute_lib.Strategy): """An asynchronous multi-worker parameter server DistributionStrategy. This strategy requires two jobs: workers and parameter servers. Variables and updates to those variables will be assigned to parameter servers and other operations are assigned to workers. When each worker has more than one GPU, operations will be replicated on these GPUs. Even though operations may be replicated, variables are not and each worker shares a common view for which parameter server a variable is assigned to. By default it uses `TFConfigClusterResolver` to detect configurations for multi-worker training. This requires a 'TF_CONFIG' environment variable and the 'TF_CONFIG' must have a cluster spec. This class assumes each worker is running the same code independently, but parameter servers are running a standard server. This means that while each worker will synchronously compute a single gradient update across all GPUs, updates between workers proceed asynchronously. Operations that occur only on the first replica (such as incrementing the global step), will occur on the first replica *of every worker*. It is expected to call `call_for_each_replica(fn, ...)` for any operations which potentially can be replicated across replicas (i.e. multiple GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra caution needs to be taken: 1) It is generally not recommended to open a device scope under the strategy's scope. A device scope (i.e. calling `tf.device`) will be merged with or override the device for operations but will not change the device for variables. 2) It is also not recommended to open a colocation scope (i.e. calling `tf.compat.v1.colocate_with`) under the strategy's scope. For colocating variables, use `strategy.extended.colocate_vars_with` instead. Colocation of ops will possibly create conflicts of device assignment. """ def __init__(self, cluster_resolver=None): """Initializes this strategy. Args: cluster_resolver: Optional `tf.distribute.cluster_resolver.ClusterResolver` object. Defaults to a `tf.distribute.cluster_resolver.TFConfigClusterResolver`. """ if cluster_resolver is None: cluster_resolver = TFConfigClusterResolver() if not cluster_resolver.cluster_spec(): raise ValueError("Cluster spec must be non-empty in `cluster_resolver`.") extended = ParameterServerStrategyExtended( self, cluster_resolver=cluster_resolver) super(ParameterServerStrategy, self).__init__(extended) @tf_export(v1=["distribute.experimental.ParameterServerStrategy"]) class ParameterServerStrategyV1(distribute_lib.StrategyV1): __doc__ = ParameterServerStrategy.__doc__ def __init__(self, cluster_resolver=None): """Initializes this strategy.""" super(ParameterServerStrategyV1, self).__init__( ParameterServerStrategyExtended( self, cluster_resolver=cluster_resolver)) # TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1. class ParameterServerStrategyExtended(distribute_lib.StrategyExtendedV1): """Implementation of ParameterServerStrategy and CentralStorageStrategy.""" def __init__(self, container_strategy, cluster_resolver=None, compute_devices=None, parameter_device=None): super(ParameterServerStrategyExtended, self).__init__(container_strategy) self._initialize_strategy( cluster_resolver=cluster_resolver, compute_devices=compute_devices, parameter_device=parameter_device) # We typically don't need to do all-reduce in this strategy. self._cross_device_ops = ( cross_device_ops_lib.ReductionToOneDevice(reduce_to_device=_LOCAL_CPU)) def _initialize_strategy(self, cluster_resolver=None, compute_devices=None, parameter_device=None): if cluster_resolver and cluster_resolver.cluster_spec(): self._initialize_multi_worker(cluster_resolver) else: self._initialize_local( compute_devices, parameter_device, cluster_resolver=cluster_resolver) def _initialize_multi_worker(self, cluster_resolver): """Initialize devices for multiple workers. It creates variable devices and compute devices. Variables and operations will be assigned to them respectively. We have one compute device per replica. The variable device is a device function or device string. The default variable device assigns variables to parameter servers in a round-robin fashion. Args: cluster_resolver: a descendant of `ClusterResolver` object. Raises: ValueError: if the cluster doesn't have ps jobs. """ # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in # some cases. if isinstance(cluster_resolver, TFConfigClusterResolver): num_gpus = context.num_gpus() else: num_gpus = cluster_resolver.num_accelerators().get("GPU", 0) # Save the num_gpus_per_worker for configure method. self._num_gpus_per_worker = num_gpus cluster_spec = cluster_resolver.cluster_spec() task_type = cluster_resolver.task_type task_id = cluster_resolver.task_id if not task_type or task_id is None: raise ValueError("When `cluster_spec` is given, you must also specify " "`task_type` and `task_id`") cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec) assert cluster_spec.as_dict() worker_device = "/job:%s/task:%d" % (task_type, task_id) self._input_host_device = numpy_dataset.SingleDevice(worker_device) # Define compute devices which is a list of device strings and one for each # replica. When there are GPUs, replicate operations on these GPUs. # Otherwise, place operations on CPU. if num_gpus > 0: compute_devices = tuple( "%s/device:GPU:%d" % (worker_device, i) for i in range(num_gpus)) else: compute_devices = (worker_device,) self._device_map = values.ReplicaDeviceMap(compute_devices) self._input_workers = input_lib.InputWorkers( self._device_map, [(worker_device, compute_devices)]) # In distributed mode, place variables on ps jobs in a round-robin fashion. # Note that devices returned from `replica_device_setter` are not # canonical and therefore we don't canonicalize all variable devices to # make them consistent. # TODO(yuefengz): support passing a strategy object to control variable # assignment. # TODO(yuefengz): merge the logic of replica_device_setter into this # class. num_ps_replicas = len(cluster_spec.as_dict().get("ps", [])) if num_ps_replicas == 0: raise ValueError("The cluster spec needs to have `ps` jobs.") self._variable_device = device_setter.replica_device_setter( ps_tasks=num_ps_replicas, worker_device=worker_device, merge_devices=True, cluster=cluster_spec) # The `_parameter_devices` is needed for the `parameter_devices` property # and is a list of all variable devices. Here parameter devices are all # tasks of the "ps" job. self._parameter_devices = tuple(map("/job:ps/task:{}".format, range(num_ps_replicas))) # Add a default device so that ops without specified devices will not end up # on other workers. self._default_device = worker_device self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type, task_id) self._cluster_spec = cluster_spec self._task_type = task_type self._task_id = task_id logging.info( "Multi-worker ParameterServerStrategy with " "cluster_spec = %r, task_type = %r, task_id = %r, " "num_ps_replicas = %r, is_chief = %r, device_map = %r, " "variable_device = %r", cluster_spec.as_dict(), task_type, task_id, num_ps_replicas, self._is_chief, self._device_map, self._variable_device) # TODO(yuefengz): get rid of cluster_resolver argument when contrib's # version no longer depends on this class. def _initialize_local(self, compute_devices, parameter_device, cluster_resolver=None): """Initialize internal devices for local training.""" worker_device = device_util.canonicalize("/device:CPU:0") self._input_host_device = numpy_dataset.SingleDevice(worker_device) if compute_devices is None: if not cluster_resolver: num_gpus = context.num_gpus() else: num_gpus = cluster_resolver.num_accelerators().get("GPU", 0) # Save the num_gpus_per_worker for configure method which is used by the # contrib version. self._num_gpus_per_worker = num_gpus compute_devices = device_util.local_devices_from_num_gpus(num_gpus) if parameter_device is None: # If there is only one GPU, put everything on that GPU. Otherwise, place # variables on CPU. if len(compute_devices) == 1: parameter_device = compute_devices[0] else: parameter_device = _LOCAL_CPU self._device_map = values.ReplicaDeviceMap(compute_devices) self._input_workers = input_lib.InputWorkers( self._device_map, [(worker_device, compute_devices)]) self._variable_device = parameter_device self._parameter_devices = (parameter_device,) self._is_chief = True self._cluster_spec = None self._task_type = None self._task_id = None logging.info( "ParameterServerStrategy with compute_devices = %r, " "variable_device = %r", compute_devices, self._variable_device) def _validate_colocate_with_variable(self, colocate_with_variable): values.validate_colocate(colocate_with_variable, self) def _experimental_distribute_dataset(self, dataset): return input_lib.get_distributed_dataset( dataset, self._input_workers, self._container_strategy(), split_batch_by=self._num_replicas_in_sync) def _make_dataset_iterator(self, dataset): return input_lib.DatasetIterator( dataset, self._input_workers, self._container_strategy(), split_batch_by=self._num_replicas_in_sync) def _make_input_fn_iterator( self, input_fn, replication_mode=distribute_lib.InputReplicationMode.PER_WORKER): """Distributes the dataset to each local GPU.""" if self._cluster_spec: input_pipeline_id = multi_worker_util.id_in_cluster( self._cluster_spec, self._task_type, self._task_id) num_input_pipelines = multi_worker_util.worker_count( self._cluster_spec, self._task_type) else: input_pipeline_id = 0 num_input_pipelines = 1 input_context = distribute_lib.InputContext( num_input_pipelines=num_input_pipelines, input_pipeline_id=input_pipeline_id, num_replicas_in_sync=self._num_replicas_in_sync) return input_lib.InputFunctionIterator(input_fn, self._input_workers, [input_context], self._container_strategy()) def _experimental_make_numpy_dataset(self, numpy_input, session): return numpy_dataset.one_host_numpy_dataset( numpy_input, self._input_host_device, session) def _experimental_distribute_datasets_from_function(self, dataset_fn): if self._cluster_spec: input_pipeline_id = multi_worker_util.id_in_cluster( self._cluster_spec, self._task_type, self._task_id) num_input_pipelines = multi_worker_util.worker_count( self._cluster_spec, self._task_type) else: input_pipeline_id = 0 num_input_pipelines = 1 input_context = distribute_lib.InputContext( num_input_pipelines=num_input_pipelines, input_pipeline_id=input_pipeline_id, num_replicas_in_sync=self._num_replicas_in_sync) return input_lib.DistributedDatasetsFromFunction( dataset_fn, self._input_workers, [input_context], self._container_strategy()) def _broadcast_to(self, tensor, destinations): # This is both a fast path for Python constants, and a way to delay # converting Python values to a tensor until we know what type it # should be converted to. Otherwise we have trouble with: # global_step.assign_add(1) # since the `1` gets broadcast as an int32 but global_step is int64. if isinstance(tensor, (float, int)): return tensor if not cross_device_ops_lib.check_destinations(destinations): # TODO(josh11b): Use current logical device instead of 0 here. destinations = values.LogicalDeviceSpec( device_map=self._device_map, logical_device=0) return self._cross_device_ops.broadcast(tensor, destinations) def _allow_variable_partition(self): return not context.executing_eagerly() # TODO(yuefengz): not all ops in device_setter.STANDARD_PS_OPS will go through # this creator, such as "MutableHashTable". def _create_variable(self, next_creator, *args, **kwargs): if self._num_replicas_in_sync > 1: aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE) if aggregation not in ( vs.VariableAggregation.NONE, vs.VariableAggregation.SUM, vs.VariableAggregation.MEAN, vs.VariableAggregation.ONLY_FIRST_REPLICA ): raise ValueError("Invalid variable aggregation mode: " + aggregation + " for variable: " + kwargs["name"]) def var_creator(*args, **kwargs): """Create an AggregatingVariable and fix up collections.""" # Record what collections this variable should be added to. collections = kwargs.pop("collections", None) if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] kwargs["collections"] = [] # Create and wrap the variable. v = next_creator(*args, **kwargs) wrapped = values.AggregatingVariable( self._container_strategy(), v, aggregation) # Add the wrapped variable to the requested collections. # The handling of eager mode and the global step matches # ResourceVariable._init_from_args(). if not context.executing_eagerly(): g = ops.get_default_graph() # If "trainable" is True, next_creator() will add the contained # variable to the TRAINABLE_VARIABLES collection, so we manually # remove it and replace with the wrapper. We can't set "trainable" # to False for next_creator() since that causes functions like # implicit_gradients to skip those variables. if kwargs.get("trainable", True): collections.append(ops.GraphKeys.TRAINABLE_VARIABLES) l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES) if v in l: l.remove(v) g.add_to_collections(collections, wrapped) elif ops.GraphKeys.GLOBAL_STEP in collections: ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, wrapped) return wrapped else: var_creator = next_creator if "colocate_with" in kwargs: colocate_with = kwargs["colocate_with"] if isinstance(colocate_with, numpy_dataset.SingleDevice): with ops.device(colocate_with.device): return var_creator(*args, **kwargs) with ops.device(None): with ops.colocate_with(colocate_with): return var_creator(*args, **kwargs) with ops.colocate_with(None, ignore_existing=True): with ops.device(self._variable_device): return var_creator(*args, **kwargs) def _call_for_each_replica(self, fn, args, kwargs): # pylint: disable=protected-access return mirrored_strategy._call_for_each_replica( self._container_strategy(), self._device_map, fn, args, kwargs) def _verify_destinations_not_different_worker(self, destinations): if not self._cluster_spec: return if destinations is None: return for d in cross_device_ops_lib.get_devices_from(destinations): d_spec = tf_device.DeviceSpec.from_string(d) if d_spec.job == self._task_type and d_spec.task != self._task_id: raise ValueError( "Cannot reduce to another worker: %r, current worker is %r" % (d, self._input_workers.worker_devices[0])) def _reduce_to(self, reduce_op, value, destinations): self._verify_destinations_not_different_worker(destinations) if not isinstance(value, values.DistributedValues): # pylint: disable=protected-access return cross_device_ops_lib.reduce_non_distributed_value( reduce_op, self._device_map, value, destinations) return self._cross_device_ops.reduce( reduce_op, value, destinations=destinations) def _batch_reduce_to(self, reduce_op, value_destination_pairs): for _, destinations in value_destination_pairs: self._verify_destinations_not_different_worker(destinations) return self._cross_device_ops.batch_reduce(reduce_op, value_destination_pairs) def _select_single_value(self, structured): """Select any single values in `structured`.""" def _select_fn(x): # pylint: disable=g-missing-docstring if isinstance(x, values.Mirrored): if len(x.devices) == 1: return x.primary else: raise ValueError( "You cannot update variable with a Mirrored object with multiple " "components %r when using ParameterServerStrategy. You must " "specify a single value or a Mirrored with a single value." % x) elif isinstance(x, values.PerReplica): raise ValueError( "You cannot update variable with a PerReplica object %r when using " "ParameterServerStrategy. You must specify a single value or a " "Mirrored with a single value" % x) else: return x return nest.map_structure(_select_fn, structured) def _update(self, var, fn, args, kwargs, group): if isinstance(var, values.AggregatingVariable): var = var.get() if not isinstance(var, resource_variable_ops.BaseResourceVariable): raise ValueError( "You can not update `var` %r. It must be a Variable." % var) with ops.colocate_with(var), distribute_lib.UpdateContext(var.device): result = fn(var, *self._select_single_value(args), **self._select_single_value(kwargs)) if group: return result else: return nest.map_structure(self._local_results, result) # TODO(yuefengz): does it need to call _select_single_value? def _update_non_slot(self, colocate_with, fn, args, kwargs, group): with ops.device( colocate_with.device), distribute_lib.UpdateContext(colocate_with): result = fn(*args, **kwargs) if group: return result else: return nest.map_structure(self._local_results, result) def _local_results(self, val): if isinstance(val, values.DistributedValues): return val.values return (val,) def value_container(self, val): if (hasattr(val, "_aggregating_container") and not isinstance(val, values.AggregatingVariable)): wrapper = val._aggregating_container() # pylint: disable=protected-access if wrapper is not None: return wrapper return val def read_var(self, var): # No need to distinguish between normal variables and replica-local # variables. return array_ops.identity(var) def _configure(self, session_config=None, cluster_spec=None, task_type=None, task_id=None): """Configures the strategy class. The strategy object will be re-initialized if `cluster_spec` is given but was not passed in the constructor. Args: session_config: not used currently. cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. task_type: the current task type. task_id: the current task id. Raises: ValueError: if `cluster_spec` is given but `task_type` or `task_id` is not. """ if cluster_spec: # Use the num_gpus_per_worker recorded in constructor since _configure # doesn't take num_gpus. cluster_resolver = SimpleClusterResolver( cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec), task_type=task_type, task_id=task_id, num_accelerators={"GPU": self._num_gpus_per_worker}) self._initialize_multi_worker(cluster_resolver) if session_config: session_config.CopyFrom(self._update_config_proto(session_config)) def _update_config_proto(self, config_proto): updated_config = copy.deepcopy(config_proto) if not self._cluster_spec: updated_config.isolate_session_state = True return updated_config updated_config.isolate_session_state = False assert self._task_type assert self._task_id is not None # The device filters prevent communication between workers. del updated_config.device_filters[:] if self._task_type in ["chief", "worker"]: updated_config.device_filters.extend( ["/job:%s/task:%d" % (self._task_type, self._task_id), "/job:ps"]) elif self._task_type == "evaluator": updated_config.device_filters.append( "/job:%s/task:%d" % (self._task_type, self._task_id)) return updated_config @property def _num_replicas_in_sync(self): return self._device_map.num_replicas_in_graph @property def worker_devices(self): return self._device_map.all_devices @property def worker_devices_by_replica(self): return self._device_map.devices_by_replica @property def parameter_devices(self): return self._parameter_devices def non_slot_devices(self, var_list): return min(var_list, key=lambda x: x.name) @property def experimental_between_graph(self): # TODO(yuefengz): Should this return False in the local case? return True @property def experimental_should_init(self): return self._is_chief @property def should_checkpoint(self): return self._is_chief @property def should_save_summary(self): return self._is_chief # TODO(priyag): Delete this once all strategies use global batch size. @property def _global_batch_size(self): """`make_dataset_iterator` and `make_numpy_iterator` use global batch size. `make_input_fn_iterator` assumes per-replica batching. Returns: Boolean. """ return True
tensorflow-master
tensorflow/python/distribute/parameter_server_strategy.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility to get tf.distribute.Strategy related contexts.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.util.lazy_loader import LazyLoader from tensorflow.python.util.tf_export import tf_export # There is a circular dependency between this and `distribute` module. So we # load it lazily to workaround this. distribute_lib = LazyLoader( "distribute_lib", globals(), "tensorflow.python.distribute.distribute_lib") # ------------------------------------------------------------------------------ # Internal API for setting the current thread mode as being either in a # replica or cross-replica context for a particular tf.distribute.Strategy. class _ThreadMode(object): def __init__(self, dist, cross, replica): self.strategy = dist self.cross_replica_context = cross self.replica_context = replica class _CrossReplicaThreadMode(_ThreadMode): def __init__(self, strategy): _ThreadMode.__init__(self, strategy, strategy, None) class _InReplicaThreadMode(_ThreadMode): def __init__(self, replica_ctx): _ThreadMode.__init__(self, replica_ctx.strategy, None, replica_ctx) def _push_per_thread_mode(context): ops.get_default_graph()._distribution_strategy_stack.append(context) # pylint: disable=protected-access def _pop_per_thread_mode(): ops.get_default_graph()._distribution_strategy_stack.pop(-1) # pylint: disable=protected-access class _DefaultReplicaThreadMode(_ThreadMode): """Type of default value returned by `_get_per_thread_mode()`. Used when the thread-local stack is empty. """ def __init__(self): _ThreadMode.__init__(self, _get_default_strategy(), None, _get_default_replica_context()) def _get_per_thread_mode(): try: return ops.get_default_graph()._distribution_strategy_stack[-1] # pylint: disable=protected-access except (AttributeError, IndexError): return _get_default_replica_mode() # ------------------------------------------------------------------------------ # Public API for accessing the current thread mode @tf_export("distribute.get_replica_context") def get_replica_context(): """Returns the current `tf.distribute.ReplicaContext` or `None`. Returns `None` if in a cross-replica context. Note that execution: 1. starts in the default (single-replica) replica context (this function will return the default `ReplicaContext` object); 2. switches to cross-replica context (in which case this will return `None`) when entering a `with tf.distribute.Strategy.scope():` block; 3. switches to a (non-default) replica context inside `extended.call_for_each_replica(fn, ...)`; 4. if `fn` calls `get_replica_context().merge_call(merge_fn, ...)`, then inside `merge_fn` you are back in the cross-replica context (and again this function will return `None`). Note that you can also go directly from step 1 to 4 to switch to a cross-replica context for the default `tf.distribute.Strategy`. You may also switch from the cross-replica context of 4 to a replica context by calling `extended.call_for_each_replica()`, jumping back to step 3. Most `tf.distribute.Strategy` methods may only be executed in a cross-replica context, in a replica context you should use the `ReplicaContext` API instead. Returns: The current `ReplicaContext` object when in a replica context scope, else `None`. Within a particular block, exactly one of these two things will be true: * `get_replica_context()` returns non-`None`, or * `tf.distribute.is_cross_replica_context()` returns True. """ return _get_per_thread_mode().replica_context def get_cross_replica_context(): """Returns the current tf.distribute.Strategy if in a cross-replica context. DEPRECATED: Please use `in_cross_replica_context()` and `get_strategy()` instead. Note that execution: 1. starts in the default (single-replica) replica context; 2. switches to cross-replica context when entering a `with tf.distribute.Strategy.scope():` block; 3. switches to a (non-default) replica context inside `call_for_each_replica(fn, ...)`; 4. if `fn` calls `get_replica_context()->merge_call(merge_fn, ...)`, then inside `merge_fn` you are back in the cross-replica context. Note that you can also go directly from step 1 to 4 to switch to a cross-replica context for the default `tf.distribute.Strategy`. You may also switch from the cross-replica context of 4 to a replica context by calling `call_for_each_replica()`, jumping back to step 3. Most `tf.distribute.Strategy` methods may only be executed in a cross-replica context. Returns: Returns the current `tf.distribute.Strategy` object in a cross-replica context, or `None`. Exactly one of `get_replica_context()` and `get_cross_replica_context()` will return `None` in a particular block. """ return _get_per_thread_mode().cross_replica_context @tf_export("distribute.in_cross_replica_context") def in_cross_replica_context(): """Returns True if in a cross-replica context. See `tf.distribute.get_replica_context` for details. Returns: True if in a cross-replica context (`get_replica_context()` returns `None`), or False if in a replica context (`get_replica_context()` returns non-`None`). """ return _get_per_thread_mode().cross_replica_context is not None @tf_export("distribute.get_strategy") def get_strategy(): """Returns the current `tf.distribute.Strategy` object. Typically only used in a cross-replica context: ``` if tf.distribute.in_cross_replica_context(): strategy = tf.distribute.get_strategy() ... ``` Returns: A `tf.distribute.Strategy` object. Inside a `with strategy.scope()` block, it returns `strategy`, otherwise it returns the default (single-replica) `tf.distribute.Strategy` object. """ return _get_per_thread_mode().strategy @tf_export("distribute.has_strategy") def has_strategy(): """Return if there is a current non-default `tf.distribute.Strategy`. Returns: True if inside a `with strategy.scope():`. """ return get_strategy() is not _get_default_strategy() def get_strategy_and_replica_context(): per_thread_mode = _get_per_thread_mode() return (per_thread_mode.strategy, per_thread_mode.replica_context) @tf_export("distribute.experimental_set_strategy") def experimental_set_strategy(strategy): """Set a `tf.distribute.Strategy` as current without `with strategy.scope()`. ``` tf.distribute.experimental_set_strategy(strategy1) f() tf.distribute.experimental_set_strategy(strategy2) g() tf.distribute.experimental_set_strategy(None) h() ``` is equivalent to: ``` with strategy1.scope(): f() with strategy2.scope(): g() h() ``` In general, you should use the `with strategy.scope():` API, but this alternative may be convenient in notebooks where you would have to put each cell in a `with strategy.scope():` block. Note: This should only be called outside of any TensorFlow scope to avoid improper nesting. Args: strategy: A `tf.distribute.Strategy` object or None. Raises: RuntimeError: If called inside a `with strategy.scope():`. """ old_scope = ops.get_default_graph()._global_distribute_strategy_scope # pylint: disable=protected-access if old_scope is not None: old_scope.__exit__(None, None, None) ops.get_default_graph()._global_distribute_strategy_scope = None # pylint: disable=protected-access if has_strategy(): raise RuntimeError( "Must not be called inside a `tf.distribute.Strategy` scope.") if strategy is not None: new_scope = strategy.scope() new_scope.__enter__() ops.get_default_graph()._global_distribute_strategy_scope = new_scope # pylint: disable=protected-access # ------------------------------------------------------------------------------ # Defaults that are used when no tf.distribute.Strategy is explicitly created. # We create them lazily in a function so that we can workaround the circular # dependency on distribute_lib. See lazy loader at the top of this file. _defaults = { "strategy": None, "replica_context": None, "replica_mode": None } def _get_default_strategy(): if _defaults["strategy"] is None: # pylint: disable=protected-access # Make sure distribute_lib module is loaded by accessing some member. _ = distribute_lib._creating_default_strategy_singleton distribute_lib._creating_default_strategy_singleton = True _defaults["strategy"] = distribute_lib._DefaultDistributionStrategy() distribute_lib._creating_default_strategy_singleton = False # pylint: enable=protected-access return _defaults["strategy"] def _get_default_replica_context(): if _defaults["replica_context"] is None: _defaults["replica_context"] = distribute_lib.ReplicaContext( _get_default_strategy(), replica_id_in_sync_group=0) return _defaults["replica_context"] def _get_default_replica_mode(): if _defaults["replica_mode"] is None: _defaults["replica_mode"] = _DefaultReplicaThreadMode() return _defaults["replica_mode"] # Aliases for compatibility with old names. get_distribution_strategy = get_strategy has_distribution_strategy = has_strategy
tensorflow-master
tensorflow/python/distribute/distribution_strategy_context.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for V1 metrics.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import combinations from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import tpu_strategy from tensorflow.python.eager import test from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import metrics from tensorflow.python.ops import variables def _labeled_dataset_fn(): # First four batches of x: labels, predictions -> (labels == predictions) # 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False # 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False # 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False # 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True return dataset_ops.Dataset.range(1000).map( lambda x: {"labels": x % 5, "predictions": x % 3}).batch( 4, drop_remainder=True) def _boolean_dataset_fn(): # First four batches of labels, predictions: {TP, FP, TN, FN} # with a threshold of 0.5: # T, T -> TP; F, T -> FP; T, F -> FN # F, F -> TN; T, T -> TP; F, T -> FP # T, F -> FN; F, F -> TN; T, T -> TP # F, T -> FP; T, F -> FN; F, F -> TN return dataset_ops.Dataset.from_tensor_slices({ "labels": [True, False, True, False], "predictions": [True, True, False, False]}).repeat().batch( 3, drop_remainder=True) def _threshold_dataset_fn(): # First four batches of labels, predictions: {TP, FP, TN, FN} # with a threshold of 0.5: # True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN # False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP # True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP # False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN return dataset_ops.Dataset.from_tensor_slices({ "labels": [True, False, True, False], "predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch( 3, drop_remainder=True) def _regression_dataset_fn(): return dataset_ops.Dataset.from_tensor_slices({ "labels": [1., .5, 1., 0.], "predictions": [1., .75, .25, 0.]}).repeat() def all_combinations(): return combinations.combine( distribution=[ strategy_combinations.default_strategy, strategy_combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus, ], mode=["graph"]) def tpu_combinations(): return combinations.combine( distribution=[ strategy_combinations.tpu_strategy_one_step, strategy_combinations.tpu_strategy ], mode=["graph"]) # TODO(josh11b): Test metrics.recall_at_top_k, metrics.average_precision_at_k, # metrics.precision_at_k class MetricsV1Test(test.TestCase, parameterized.TestCase): def _test_metric(self, distribution, dataset_fn, metric_fn, expected_fn): with ops.Graph().as_default(), distribution.scope(): iterator = distribution.make_input_fn_iterator(lambda _: dataset_fn()) if isinstance(distribution, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)): def step_fn(ctx, inputs): value, update = distribution.extended.call_for_each_replica( metric_fn, args=(inputs,)) ctx.set_non_tensor_output(name="value", output=value) return distribution.group(update) ctx = distribution.extended.experimental_run_steps_on_iterator( step_fn, iterator, iterations=distribution.extended.steps_per_run) update = ctx.run_op value = ctx.non_tensor_outputs["value"] # In each run, we run multiple steps, and each steps consumes as many # batches as number of replicas. batches_per_update = ( distribution.num_replicas_in_sync * distribution.extended.steps_per_run) else: value, update = distribution.extended.call_for_each_replica( metric_fn, args=(iterator.get_next(),)) update = distribution.group(update) # TODO(josh11b): Once we switch to using a global batch size for input, # replace "distribution.num_replicas_in_sync" with "1". batches_per_update = distribution.num_replicas_in_sync self.evaluate(iterator.initialize()) self.evaluate(variables.local_variables_initializer()) batches_consumed = 0 for i in range(4): self.evaluate(update) batches_consumed += batches_per_update self.assertAllClose(expected_fn(batches_consumed), self.evaluate(value), 0.001, msg="After update #" + str(i+1)) if batches_consumed >= 4: # Consume 4 input batches in total. break @combinations.generate(all_combinations() + tpu_combinations()) def testMean(self, distribution): def _dataset_fn(): return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch( 4, drop_remainder=True) def _expected_fn(num_batches): # Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc. return num_batches * 2 - 0.5 self._test_metric(distribution, _dataset_fn, metrics.mean, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testAccuracy(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.accuracy(labels, predictions) def _expected_fn(num_batches): return [3./4, 3./8, 3./12, 4./16][num_batches - 1] self._test_metric( distribution, _labeled_dataset_fn, _metric_fn, _expected_fn) # TODO(priyag, jhseu): Enable TPU for this test once scatter_add is added # for TPUMirroredVariable. @combinations.generate(all_combinations()) def testMeanPerClassAccuracy(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.mean_per_class_accuracy( labels, predictions, num_classes=5) def _expected_fn(num_batches): mean = lambda x: sum(x) / len(x) return [mean([1., 1., 1., 0., 0.]), mean([0.5, 0.5, 0.5, 0., 0.]), mean([1./3, 1./3, 0.5, 0., 0.]), mean([0.5, 1./3, 1./3, 0., 0.])][num_batches - 1] self._test_metric( distribution, _labeled_dataset_fn, _metric_fn, _expected_fn) # NOTE(priyag): This metric doesn't work on TPUs yet. @combinations.generate(all_combinations()) def testMeanIOU(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.mean_iou( labels, predictions, num_classes=5) def _expected_fn(num_batches): mean = lambda x: sum(x) / len(x) return [mean([1./2, 1./1, 1./1, 0.]), # no class 4 in first batch mean([1./4, 1./4, 1./3, 0., 0.]), mean([1./6, 1./6, 1./5, 0., 0.]), mean([2./8, 1./7, 1./7, 0., 0.])][num_batches - 1] self._test_metric( distribution, _labeled_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testMeanTensor(self, distribution): def _dataset_fn(): dataset = dataset_ops.Dataset.range(1000).map(math_ops.to_float) # Want to produce a fixed, known shape, so drop remainder when batching. dataset = dataset.batch(4, drop_remainder=True) return dataset def _expected_fn(num_batches): # Mean(0, 4, ..., 4 * num_batches - 4) == 2 * num_batches - 2 # Mean(1, 5, ..., 4 * num_batches - 3) == 2 * num_batches - 1 # Mean(2, 6, ..., 4 * num_batches - 2) == 2 * num_batches # Mean(3, 7, ..., 4 * num_batches - 1) == 2 * num_batches + 1 first = 2. * num_batches - 2. return [first, first + 1., first + 2., first + 3.] self._test_metric( distribution, _dataset_fn, metrics.mean_tensor, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testAUCROC(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.auc(labels, predictions, num_thresholds=8, curve="ROC", summation_method="careful_interpolation") def _expected_fn(num_batches): return [0.5, 7./9, 0.8, 0.75][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testAUCPR(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.auc(labels, predictions, num_thresholds=8, curve="PR", summation_method="careful_interpolation") def _expected_fn(num_batches): return [0.797267, 0.851238, 0.865411, 0.797267][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testFalseNegatives(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.false_negatives(labels, predictions) def _expected_fn(num_batches): return [1., 1., 2., 3.][num_batches - 1] self._test_metric( distribution, _boolean_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testFalseNegativesAtThresholds(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.false_negatives_at_thresholds(labels, predictions, [.5]) def _expected_fn(num_batches): return [[1.], [1.], [2.], [3.]][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testTrueNegatives(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.true_negatives(labels, predictions) def _expected_fn(num_batches): return [0., 1., 2., 3.][num_batches - 1] self._test_metric( distribution, _boolean_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testTrueNegativesAtThresholds(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.true_negatives_at_thresholds(labels, predictions, [.5]) def _expected_fn(num_batches): return [[0.], [1.], [2.], [3.]][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testFalsePositives(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.false_positives(labels, predictions) def _expected_fn(num_batches): return [1., 2., 2., 3.][num_batches - 1] self._test_metric( distribution, _boolean_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testFalsePositivesAtThresholds(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.false_positives_at_thresholds(labels, predictions, [.5]) def _expected_fn(num_batches): return [[1.], [2.], [2.], [3.]][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testTruePositives(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.true_positives(labels, predictions) def _expected_fn(num_batches): return [1., 2., 3., 3.][num_batches - 1] self._test_metric( distribution, _boolean_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testTruePositivesAtThresholds(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.true_positives_at_thresholds(labels, predictions, [.5]) def _expected_fn(num_batches): return [[1.], [2.], [3.], [3.]][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testPrecision(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.precision(labels, predictions) def _expected_fn(num_batches): return [0.5, 0.5, 0.6, 0.5][num_batches - 1] self._test_metric( distribution, _boolean_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testPrecisionAtThreshold(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.precision_at_thresholds(labels, predictions, [0.5]) def _expected_fn(num_batches): return [[0.5], [0.5], [0.6], [0.5]][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testRecall(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.recall(labels, predictions) def _expected_fn(num_batches): return [0.5, 2./3, 0.6, 0.5][num_batches - 1] self._test_metric( distribution, _boolean_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testRecallAtThreshold(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.recall_at_thresholds(labels, predictions, [0.5]) def _expected_fn(num_batches): return [[0.5], [2./3], [0.6], [0.5]][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testMeanSquaredError(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.mean_squared_error(labels, predictions) def _expected_fn(num_batches): return [0., 1./32, 0.208333, 0.15625][num_batches - 1] self._test_metric( distribution, _regression_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations() + tpu_combinations()) def testRootMeanSquaredError(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.root_mean_squared_error(labels, predictions) def _expected_fn(num_batches): return [0., 0.176777, 0.456435, 0.395285][num_batches - 1] self._test_metric( distribution, _regression_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations()) def testSensitivityAtSpecificity(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.sensitivity_at_specificity(labels, predictions, 0.8) def _expected_fn(num_batches): return [0.5, 2./3, 0.6, 0.5][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) @combinations.generate(all_combinations()) def testSpecificityAtSensitivity(self, distribution): def _metric_fn(x): labels = x["labels"] predictions = x["predictions"] return metrics.specificity_at_sensitivity(labels, predictions, 0.95) def _expected_fn(num_batches): return [0., 1./3, 0.5, 0.5][num_batches - 1] self._test_metric( distribution, _threshold_dataset_fn, _metric_fn, _expected_fn) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/metrics_v1_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes implementing a multi-worker ps DistributionStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.distribute import device_util from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import parameter_server_strategy from tensorflow.python.util.tf_export import tf_export @tf_export("distribute.experimental.CentralStorageStrategy", v1=[]) class CentralStorageStrategy(distribute_lib.Strategy): """A one-machine strategy that puts all variables on a single device. Variables are assigned to local CPU or the only GPU. If there is more than one GPU, compute operations (other than variable update operations) will be replicated across all GPUs. Args: compute_devices: an optional list of strings for device to replicate models on. If this is not provided, all local GPUs will be used; if there is no GPU, local CPU will be used. parameter_device: an optional device string for which device to put variables on. The default one is CPU or GPU if there is only one. """ def __init__(self, compute_devices=None, parameter_device=None): extended = parameter_server_strategy.ParameterServerStrategyExtended( self, compute_devices=compute_devices, parameter_device=parameter_device) super(CentralStorageStrategy, self).__init__(extended) @classmethod def _from_num_gpus(cls, num_gpus): return cls(device_util.local_devices_from_num_gpus(num_gpus)) @tf_export(v1=["distribute.experimental.CentralStorageStrategy"]) class CentralStorageStrategyV1(distribute_lib.StrategyV1): __doc__ = CentralStorageStrategy.__doc__ def __init__(self, compute_devices=None, parameter_device=None): """Initializes this strategy with default TFConfigClusterResolver.""" super(CentralStorageStrategyV1, self).__init__( parameter_server_strategy.ParameterServerStrategyExtended( self, compute_devices=compute_devices, parameter_device=parameter_device))
tensorflow-master
tensorflow/python/distribute/central_storage_strategy.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Device-related support functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import device as tf_device from tensorflow.python.framework import ops def canonicalize(d, default=None): """Canonicalize device string. If d has missing components, the rest would be deduced from the `default` argument or from '/replica:0/task:0/device:CPU:0'. For example: If d = '/cpu:0', default='/job:worker/task:1', it returns '/job:worker/replica:0/task:1/device:CPU:0'. If d = '/cpu:0', default='/job:worker', it returns '/job:worker/replica:0/task:0/device:CPU:0'. If d = '/gpu:0', default=None, it returns '/replica:0/task:0/device:GPU:0'. Note: This uses "job:localhost" as the default if executing eagerly. Args: d: a device string. default: a string for default device if d doesn't have all components. Returns: a canonicalized device string. """ d = tf_device.DeviceSpec.from_string(d) assert d.device_type is None or d.device_type == d.device_type.upper(), ( "Device type '%s' must be all-caps." % (d.device_type,)) # Fill in missing device fields using defaults. result = tf_device.DeviceSpec( replica=0, task=0, device_type="CPU", device_index=0) if ops.executing_eagerly_outside_functions(): result = result.replace(job="localhost") if default: result = result.make_merged_spec( tf_device.DeviceSpec.from_string(default)) # Apply `d` last, so that it takes precidence over the defaults. result = result.make_merged_spec(d) return result.to_string() def resolve(d): """Canonicalize `d` with current device as default.""" return canonicalize(d, default=current()) class _FakeNodeDef(object): """A fake NodeDef for _FakeOperation.""" def __init__(self): self.op = "" self.name = "" class _FakeOperation(object): """A fake Operation object to pass to device functions.""" def __init__(self): self.device = "" self.type = "" self.name = "" self.node_def = _FakeNodeDef() def _set_device(self, device): self.device = ops._device_string(device) # pylint: disable=protected-access def _set_device_from_string(self, device_str): self.device = device_str def current(): """Return a string (not canonicalized) for the current device.""" # TODO(josh11b): Work out how this function interacts with ops.colocate_with. if ops.executing_eagerly_outside_functions(): d = context.context().device_name else: op = _FakeOperation() ops.get_default_graph()._apply_device_functions(op) # pylint: disable=protected-access d = op.device return d def get_host_for_device(device): """Returns the corresponding host device for the given device.""" spec = tf_device.DeviceSpec.from_string(device) return tf_device.DeviceSpec( job=spec.job, replica=spec.replica, task=spec.task, device_type="CPU", device_index=0).to_string() def local_devices_from_num_gpus(num_gpus): """Returns device strings for local GPUs or CPU.""" return (tuple("/device:GPU:%d" % i for i in range(num_gpus)) or ("/device:CPU:0",))
tensorflow-master
tensorflow/python/distribute/device_util.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test MirroredVariable in MirroredStrategy and MultiWorkerMirroredStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.ops import dataset_ops from tensorflow.python.distribute import collective_all_reduce_strategy from tensorflow.python.distribute import combinations from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import strategy_combinations from tensorflow.python.distribute import values from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops from tensorflow.python.layers import core from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables def _replica_id(): replica_id = ds_context.get_replica_context().replica_id_in_sync_group if not isinstance(replica_id, ops.Tensor): replica_id = constant_op.constant(replica_id) return replica_id def _mimic_two_cpus(): cpus = config.list_physical_devices("CPU") config.set_virtual_device_configuration(cpus[0], [ context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration(), ]) @combinations.generate( combinations.combine( distribution=[ strategy_combinations.mirrored_strategy_with_gpu_and_cpu, combinations.NamedDistribution( "Collective2CPUs", # pylint: disable=g-long-lambda lambda: collective_all_reduce_strategy. CollectiveAllReduceStrategy._from_local_devices(( "/device:CPU:0", "/device:CPU:1")), required_gpus=0) ], mode=["graph", "eager"])) class MirroredVariableCreationTest(test.TestCase): """Base class that tests mirrored variable creator. Currently it assumes all strategy objects have two replicas. """ @classmethod def setUpClass(cls): _mimic_two_cpus() # TODO(priyag): Modify more tests to use this helper and check more # properties. def _test_mv_properties(self, var, name, strategy): self.assertIsInstance(var, values.MirroredVariable) self.assertEqual(name, var.name) self.assertIs(strategy, var.distribute_strategy) for d in var.devices: self.assertEqual(d, var.get(d).device) self.assertIs(strategy, var.get(d)._distribute_strategy) # pylint: disable=protected-access def testVariableInFuncGraph(self, distribution): def model_fn(): v = variable_scope.variable(2.0, name="bar") ds_context.get_replica_context().merge_call(lambda _: _) return v with func_graph.FuncGraph("fg").as_default(), distribution.scope(): v1 = variable_scope.variable(1.0, name="foo") v2 = distribution.extended.call_for_each_replica(model_fn) self._test_mv_properties(v1, "foo:0", distribution) self._test_mv_properties(v2, "bar:0", distribution) def testVariableWithTensorInitialValueInFunction(self, distribution): if not context.executing_eagerly(): self.skipTest("`tf.function` is an eager-only feature") v = [None] def model_fn(): if v[0] is None: init_val = array_ops.zeros([]) v[0] = variables.Variable(init_val) ds_context.get_replica_context().merge_call(lambda _: _) return v[0] @def_function.function(autograph=False) def make_v1(): return distribution.experimental_local_results( distribution.extended.call_for_each_replica(model_fn)) self.assertAllEqual([0, 0], make_v1()) def testSingleVariable(self, distribution): def model_fn(): # This variable should be created only once across the threads because of # special variable_creator functions used by # `distribution.extended.call_for_each_replica`. v = variable_scope.variable(1.0, name="foo") ds_context.get_replica_context().merge_call(lambda _: _) return v with distribution.scope(): result = distribution.extended.call_for_each_replica(model_fn) self._test_mv_properties(result, "foo:0", distribution) def testUnnamedVariable(self, distribution): def model_fn(): v = variable_scope.variable(1.0) ds_context.get_replica_context().merge_call(lambda _: _) return v with distribution.scope(): result = distribution.extended.call_for_each_replica(model_fn) self._test_mv_properties(result, "Variable:0", distribution) def testMultipleVariables(self, distribution): def model_fn(): vs = [] for i in range(5): vs.append(variable_scope.variable(1.0, name="foo" + str(i))) ds_context.get_replica_context().merge_call(lambda _: _) return vs with distribution.scope(): result = distribution.extended.call_for_each_replica(model_fn) for i, v in enumerate(result): self._test_mv_properties(v, "foo" + str(i) + ":0", distribution) def testMultipleVariablesWithSameCanonicalName(self, distribution): def model_fn(): vs = [] vs.append(variable_scope.variable(1.0, name="foo/bar")) vs.append(variable_scope.variable(1.0, name="foo_1/bar")) vs.append(variable_scope.variable(1.0, name="foo_1/bar_1")) vs.append(variable_scope.variable(1.0, name="foo/bar_1")) ds_context.get_replica_context().merge_call(lambda _: _) return vs with distribution.scope(): result = distribution.extended.call_for_each_replica(model_fn) for v in result: self.assertIsInstance(v, values.MirroredVariable) self.assertEqual(4, len(result)) self.assertEqual("foo/bar:0", result[0].name) self.assertEqual("foo_1/bar:0", result[1].name) self.assertEqual("foo_1/bar_1:0", result[2].name) self.assertEqual("foo/bar_1:0", result[3].name) def testVariableWithSameCanonicalNameAcrossThreads(self, distribution): def model_fn(): replica_id = self.evaluate(_replica_id()) v = variable_scope.variable(1.0, name="foo_" + str(replica_id)) ds_context.get_replica_context().merge_call(lambda _: _) return v with distribution.scope(): result = distribution.extended.call_for_each_replica(model_fn) self.assertIsInstance(result, values.MirroredVariable) # The resulting mirrored variable will use the name from the first device. self.assertEqual("foo_0:0", result.name) def testWithLayers(self, distribution): def model_fn(features): with variable_scope.variable_scope("common"): layer1 = core.Dense(1) layer1(features) layer2 = core.Dense(1) layer2(features) # This will pause the current thread, and execute the other thread. ds_context.get_replica_context().merge_call(lambda _: _) layer3 = core.Dense(1) layer3(features) return [(layer1.kernel, layer1.bias), (layer2.kernel, layer2.bias), (layer3.kernel, layer3.bias)] iterator = distribution.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10)) self.evaluate(iterator.initialize()) features = iterator.get_next() with distribution.scope(): result = distribution.extended.call_for_each_replica( model_fn, args=(features,)) suffixes = ["", "_1", "_2"] for (kernel, bias), suffix in zip(result, suffixes): self.assertIsInstance(kernel, values.MirroredVariable) self.assertEqual("common/dense" + suffix + "/kernel:0", kernel.name) self.assertIsInstance(bias, values.MirroredVariable) self.assertEqual("common/dense" + suffix + "/bias:0", bias.name) def testWithVariableAndVariableScope(self, distribution): def model_fn(): v0 = variable_scope.variable(1.0, name="var0", aggregation=None) with variable_scope.variable_scope("common"): v1 = variable_scope.variable(1.0, name="var1") # This will pause the current thread, and execute the other thread. ds_context.get_replica_context().merge_call(lambda _: _) v2 = variable_scope.variable( 1.0, name="var2", synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.SUM) v3 = variable_scope.variable( 1.0, name="var3", synchronization=variable_scope.VariableSynchronization.ON_WRITE, aggregation=variable_scope.VariableAggregation.MEAN) return v0, v1, v2, v3 with distribution.scope(): v = variable_scope.variable(1.0, name="var-main0") self.assertEqual("var-main0:0", v.name) result = distribution.extended.call_for_each_replica(model_fn) self.assertEqual(4, len(result)) v0, v1, v2, v3 = result self.assertIsInstance(v0, values.MirroredVariable) self.assertEqual("var0:0", v0.name) self.assertIsInstance(v1, values.MirroredVariable) self.assertEqual("common/var1:0", v1.name) self.assertIsInstance(v2, values.SyncOnReadVariable) self.assertEqual("common/var2:0", v2.name) self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation) self.assertIsInstance(v3, values.MirroredVariable) self.assertEqual("common/var3:0", v3.name) self.assertEqual(variable_scope.VariableAggregation.MEAN, v3.aggregation) def testWithGetVariableAndVariableScope(self, distribution): def model_fn(): v0 = variable_scope.get_variable("var0", [1]) with variable_scope.variable_scope("common"): v1 = variable_scope.get_variable("var1", [1]) # This will pause the current thread, and execute the other thread. ds_context.get_replica_context().merge_call(lambda _: _) v2 = variable_scope.get_variable( "var2", [1], synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.SUM) v3 = variable_scope.get_variable( "var3", [1], synchronization=variable_scope.VariableSynchronization.ON_WRITE, aggregation=variable_scope.VariableAggregation.MEAN) return v0, v1, v2, v3 with distribution.scope(): with variable_scope.variable_scope("main"): v = variable_scope.get_variable("var-main0", [1]) self.assertEqual("main/var-main0:0", v.name) result = distribution.extended.call_for_each_replica(model_fn) self.assertEqual(4, len(result)) v0, v1, v2, v3 = result self.assertIsInstance(v0, values.MirroredVariable) self.assertEqual("main/var0:0", v0.name) self.assertIsInstance(v1, values.MirroredVariable) self.assertEqual("main/common/var1:0", v1.name) self.assertIsInstance(v2, values.SyncOnReadVariable) self.assertEqual("main/common/var2:0", v2.name) self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation) self.assertIsInstance(v3, values.MirroredVariable) self.assertEqual("main/common/var3:0", v3.name) self.assertEqual(variable_scope.VariableAggregation.MEAN, v3.aggregation) def testOnlyFirstReplicaUpdatesVariables(self, distribution): def create_fn(): aggregation = variable_scope.VariableAggregation.ONLY_FIRST_REPLICA v0 = variable_scope.variable( 2.0, name="on_read", synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=aggregation) v1 = variable_scope.variable( 3.0, name="on_write", synchronization=variable_scope.VariableSynchronization.ON_WRITE, aggregation=aggregation) return v0, v1 devices = distribution.extended.worker_devices with distribution.scope(): v0, v1 = distribution.extended.call_for_each_replica(create_fn) self.evaluate(v0.initializer) self.assertEqual(2.0, self.evaluate(v0.get(devices[0]))) self.assertEqual(2.0, self.evaluate(v0.get(devices[1]))) self.assertEqual(2.0, self.evaluate(distribution.extended.read_var(v0))) self.evaluate(v1.initializer) self.assertEqual(3.0, self.evaluate(v1.get(devices[0]))) self.assertEqual(3.0, self.evaluate(v1.get(devices[1]))) self.assertEqual(3.0, self.evaluate(distribution.extended.read_var(v1))) def replica_id_plus_one(): return math_ops.cast(_replica_id() + 1, dtype=dtypes.float32) # Update using the assign_add member function. def update_member_fn(): update0 = v0.assign_add(5.0 * replica_id_plus_one()) update1 = v1.assign_add(7.0 * replica_id_plus_one()) return update0, update1 update0a, update1a = distribution.extended.call_for_each_replica( update_member_fn) # Update "sync on read" variable. self.evaluate(distribution.group(update0a)) self.assertEqual(2.0 + 5.0, self.evaluate(v0.get(devices[0]))) # Writes are not synchronized for "sync on read" variables, # so device[1] can end up with a different value. self.assertEqual(2.0 + 2 * 5.0, self.evaluate(v0.get(devices[1]))) # Always reads from device 0. self.assertEqual(2.0 + 5.0, self.evaluate(distribution.extended.read_var(v0))) # Update "sync on write" variable. self.evaluate(distribution.group(update1a)) self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[0]))) # Writes are synchronized for v1, only the argument to assign_add on # device[0] is used. self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[1]))) self.assertEqual(3.0 + 7.0, self.evaluate(distribution.extended.read_var(v1))) # Update using state_ops.assign_add global function. def update_state_ops_fn(): update0 = state_ops.assign_add(v0, 11.0 * replica_id_plus_one()) update1 = state_ops.assign_add(v1, 13.0 * replica_id_plus_one()) return update0, update1 update0b, update1b = distribution.extended.call_for_each_replica( update_state_ops_fn) self.evaluate(distribution.group(update0b)) # Update "sync on read" variable. self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(v0.get(devices[0]))) self.assertEqual(2.0 + 2 * 5.0 + 2 * 11.0, self.evaluate(v0.get(devices[1]))) self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(distribution.extended.read_var(v0))) # Update "sync on write" variable. self.evaluate(distribution.group(update1b)) self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[0]))) self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[1]))) self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(distribution.extended.read_var(v1))) def testNoneSynchronizationWithGetVariable(self, distribution): with distribution.scope(): with self.assertRaisesRegexp( ValueError, "`NONE` variable synchronization mode is not " "supported with `Mirrored` distribution strategy. Please change " "the `synchronization` for variable: v"): variable_scope.get_variable( "v", [1], synchronization=variable_scope.VariableSynchronization.NONE) def testNoneSynchronizationWithVariable(self, distribution): with distribution.scope(): with self.assertRaisesRegexp( ValueError, "`NONE` variable synchronization mode is not " "supported with `Mirrored` distribution strategy. Please change " "the `synchronization` for variable: v"): variable_scope.variable( 1.0, name="v", synchronization=variable_scope.VariableSynchronization.NONE) def testInvalidSynchronizationWithVariable(self, distribution): with distribution.scope(): with self.assertRaisesRegexp( ValueError, "Invalid variable synchronization mode: Invalid for " "variable: v"): variable_scope.variable(1.0, name="v", synchronization="Invalid") def testInvalidAggregationWithGetVariable(self, distribution): with distribution.scope(): with self.assertRaisesRegexp( ValueError, "Invalid variable aggregation mode: invalid for " "variable: v"): variable_scope.get_variable( "v", [1], synchronization=variable_scope.VariableSynchronization.ON_WRITE, aggregation="invalid") def testInvalidAggregationWithVariable(self, distribution): with distribution.scope(): with self.assertRaisesRegexp( ValueError, "Invalid variable aggregation mode: invalid for " "variable: v"): variable_scope.variable( 1.0, name="v", synchronization=variable_scope.VariableSynchronization.ON_WRITE, aggregation="invalid") def testNonMatchingVariableCreation(self, distribution): self.skipTest("b/123075960") def model_fn(name): v = variable_scope.variable(1.0, name=name) ds_context.get_replica_context().merge_call(lambda _: _) return v with distribution.scope(): device_map = values.ReplicaDeviceMap(distribution.extended.worker_devices) names = values.DistributedValues(device_map, ("foo", "bar")) with self.assertRaises(RuntimeError): _ = distribution.extended.call_for_each_replica(model_fn, args=(names,)) def testSyncOnReadVariable(self, distribution): all_v_sum = {} all_v_mean = {} components_sum = {} components_mean = {} def model_fn(): replica_id = self.evaluate(_replica_id()) v_sum = variable_scope.variable( 1.0, synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.SUM) v_mean = variable_scope.variable( 4.0, synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.MEAN) self.assertIsInstance(v_sum, values.SyncOnReadVariable) self.assertIsInstance(v_mean, values.SyncOnReadVariable) updates = [ v_sum.assign_add(2.0 + replica_id), v_mean.assign(6.0 * replica_id) ] all_v_sum[replica_id] = v_sum all_v_mean[replica_id] = v_mean c_sum = v_sum.get() c_mean = v_mean.get() components_sum[replica_id] = c_sum components_mean[replica_id] = c_mean self.assertIsNot(v_sum, c_sum) self.assertIsNot(v_mean, c_mean) return updates, v_sum, v_mean, c_sum, c_mean with distribution.scope(): # Create "sum" and "mean" versions of SyncOnReadVariables. ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = ( distribution.extended.call_for_each_replica(model_fn)) # Should see the same wrapping instance in all replicas. self.assertIs(all_v_sum[0], ret_v_sum) self.assertIs(all_v_mean[0], ret_v_mean) self.assertIs(all_v_sum[0], all_v_sum[1]) self.assertIs(all_v_mean[0], all_v_mean[1]) # Regroup should recover the same wrapper. self.assertIs(ret_v_sum, regrouped_sum) self.assertIs(ret_v_mean, regrouped_mean) self.assertIsNot(components_sum[0], components_sum[1]) self.assertIsNot(components_mean[0], components_mean[1]) # Apply updates self.evaluate(variables.global_variables_initializer()) self.evaluate([ y for x in ret_ops # pylint: disable=g-complex-comprehension for y in distribution.experimental_local_results(x) ]) expected_sum = 0.0 expected_mean = 0.0 for i, d in enumerate(distribution.extended.worker_devices): # Should see different values on different devices. v_sum_value = self.evaluate(ret_v_sum.get(d).read_value()) v_mean_value = self.evaluate(ret_v_mean.get(d).read_value()) expected = i + 3.0 self.assertEqual(expected, v_sum_value) expected_sum += expected expected = i * 6.0 self.assertEqual(expected, v_mean_value) expected_mean += expected expected_mean /= len(distribution.extended.worker_devices) # Without get(device), should return the value you get by # applying the reduction across all replicas (whether you use # read_var(), get(), or nothing). self.assertEqual(expected_sum, self.evaluate( distribution.extended.read_var(ret_v_sum))) self.assertEqual(expected_mean, self.evaluate( distribution.extended.read_var(ret_v_mean))) self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get())) self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get())) self.assertEqual(expected_sum, self.evaluate(ret_v_sum)) self.assertEqual(expected_mean, self.evaluate(ret_v_mean)) # TODO(priyag): Update this test to work in eager mode as well. def testDynamicRnnVariables(self, distribution): def model_fn(): inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]]) cell_fw = rnn_cell_impl.LSTMCell(300) cell_bw = rnn_cell_impl.LSTMCell(300) (outputs, _) = rnn.bidirectional_dynamic_rnn( cell_fw, cell_bw, inputs, dtype=dtypes.float32) return outputs with context.graph_mode(), distribution.scope(): result = distribution.extended.call_for_each_replica(model_fn) # Two variables are created by the RNN layer. self.assertEqual(2, len(result)) for v in result: self.assertIsInstance(v, values.DistributedValues) _, v1 = distribution.experimental_local_results(v) self.assertStartsWith(v1._op.name, "replica_1/") def testSyncOnReadVariableUpdate(self, distribution): def model_fn(): v_sum = variable_scope.variable( 1.0, synchronization=variable_scope.VariableSynchronization.ON_READ, aggregation=variable_scope.VariableAggregation.SUM) self.assertIsInstance(v_sum, values.SyncOnReadVariable) return v_sum def update(var, value): return var.assign(value) with distribution.scope(): ret_v_sum = distribution.extended.call_for_each_replica(model_fn) # Initialize variables. self.evaluate(variables.global_variables_initializer()) # Assert that the aggregated value of the sync on read var is the sum # of the individual values before running the update ops. self.assertEqual( 1.0, self.evaluate( ret_v_sum.get( distribution.extended.worker_devices[0]).read_value())) self.assertEqual(2.0, self.evaluate(ret_v_sum)) # Apply updates. update_ops = distribution.extended.update( ret_v_sum, update, args=(5.0,), group=False) self.evaluate(update_ops) # Assert that the aggregated value of the sync on read vars is the sum # of the individual values after running the update ops. self.assertEqual( 5.0, self.evaluate( ret_v_sum.get( distribution.extended.worker_devices[0]).read_value())) self.assertEqual(10.0, self.evaluate(ret_v_sum)) def testVarDistributeStrategy(self, distribution): with distribution.scope(): mirrored = variable_scope.variable(1.0) sync_on_read = variable_scope.variable( 1.0, synchronization=variable_scope.VariableSynchronization.ON_READ) self.assertIs(distribution, mirrored.distribute_strategy) self.assertIs(distribution, sync_on_read.distribute_strategy) if __name__ == "__main__": test.main()
tensorflow-master
tensorflow/python/distribute/mirrored_variable_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os from tensorflow.compiler.tests import xla_test from tensorflow.python.distribute import tpu_strategy from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import core from tensorflow.python.platform import test from tensorflow.python.training import adam as adam_v1 from tensorflow.python.training import checkpoint_management from tensorflow.python.training import training_util from tensorflow.python.training.tracking import tracking from tensorflow.python.training.tracking import util as trackable_utils class NonLayerTrackable(tracking.AutoTrackable): def __init__(self): super(NonLayerTrackable, self).__init__() self.a_variable = trackable_utils.add_variable( self, name="a_variable", shape=[]) class Subclassed(training.Model): """A concrete Model for testing.""" def __init__(self): super(Subclassed, self).__init__() self._named_dense = core.Dense(1, use_bias=True) self._second = core.Dense(1, use_bias=False) # We can still track Trackables which aren't Layers. self._non_layer = NonLayerTrackable() def call(self, values): ret = self._second(self._named_dense(values)) return ret class TrainingCheckpointTests(xla_test.XLATestCase): def testEagerTPUDistributionStrategy(self): self.skipTest("b/121387144") num_training_steps = 10 checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") def _train_fn(optimizer, model): input_value = constant_op.constant([[3.]]) optimizer.minimize( functools.partial(model, input_value), global_step=root.optimizer_step) for training_continuation in range(3): strategy = tpu_strategy.TPUStrategy() with strategy.scope(): model = Subclassed() optimizer = adam_v1.AdamOptimizer(0.001) root = trackable_utils.Checkpoint( optimizer=optimizer, model=model, optimizer_step=training_util.get_or_create_global_step()) root.restore(checkpoint_management.latest_checkpoint( checkpoint_directory)) for _ in range(num_training_steps): strategy.extended.call_for_each_replica( functools.partial(_train_fn, optimizer, model)) root.save(file_prefix=checkpoint_prefix) self.assertEqual((training_continuation + 1) * num_training_steps, root.optimizer_step.numpy()) if __name__ == "__main__": ops.enable_eager_execution() test.main()
tensorflow-master
tensorflow/python/distribute/checkpointing_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Input-pipeline utilities for Distribution strategies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import traverse from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import ops # pylint: disable=protected-access def auto_shard_dataset(dataset, num_shards, index): """Shard the input pipeline by sharding the underlying list of files. Args: dataset: A `tf.data.Dataset` instance, typically the result of a bunch of dataset transformations. num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of shards operating in parallel. Same usage as in `tf.data.Dataset.shard`. index: A `tf.int64` scalar `tf.Tensor`, representing the worker index. Same usage as in `tf.data.Dataset.shard`. Returns: A modified `Dataset` obtained by updating the pipeline sharded by the files. The input dataset will be returned if we cannot automatically determine a good way to shard the input dataset. """ if dataset.options().experimental_distribute.auto_shard: if isinstance(dataset, dataset_ops.DatasetV1): return distribute._AutoShardDatasetV1(dataset, num_shards, index) else: return distribute._AutoShardDataset(dataset, num_shards, index) else: return dataset def _clone_dataset(dataset): """Returns a cloned version of `dataset`.""" variant_tensor_ops = traverse.obtain_all_variant_tensor_ops(dataset) remap_dict = _clone_helper(dataset._variant_tensor.op, variant_tensor_ops) new_variant_tensor = remap_dict[dataset._variant_tensor.op].outputs[0] return dataset_ops._VariantDataset(new_variant_tensor, dataset._element_structure) def _get_op_def(op): return op.op_def or op_def_registry.get_registered_ops()[op.type] def _clone_helper(op_to_clone, variant_tensor_ops): """Helper method that recursively clones `op_to_clone`. Args: op_to_clone: The op we want to clone. variant_tensor_ops: A list of ops that we have to clone along the way. Returns: A dictionary mapping old_ops to new_ops created. Includes op_to_clone as a key. """ remap_dict = {} for input_tensor in op_to_clone.inputs: input_tensor_op = input_tensor.op if input_tensor_op in variant_tensor_ops: recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops) remap_dict.update(recursive_map) inputs_list = [] for input_tensor in op_to_clone.inputs: input_tensor_op = input_tensor.op if input_tensor_op in remap_dict: remapped_input = remap_dict[input_tensor_op].outputs[0] inputs_list.append(remapped_input) else: inputs_list.append(input_tensor_op.outputs[input_tensor.value_index]) g = ops.get_default_graph() new_op = g.create_op( op_to_clone.type, inputs_list, [o.dtype for o in op_to_clone.outputs], name=op_to_clone.name, attrs=op_to_clone.node_def.attr, op_def=_get_op_def(op_to_clone)) remap_dict[op_to_clone] = new_op return remap_dict
tensorflow-master
tensorflow/python/distribute/input_ops.py