python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================== """Tests for python.tpu.feature_column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.client import session from tensorflow.python.feature_column import feature_column_lib as fc_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.tpu import feature_column_v2 as tpu_fc def _initialized_session(): sess = session.Session() sess.run(variables_lib.global_variables_initializer()) sess.run(lookup_ops.tables_initializer()) return sess class EmbeddingColumnTestV2(test.TestCase): def test_defaults(self): categorical_column = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = tpu_fc.embedding_column_v2( categorical_column, dimension=embedding_dimension) # Can't test default initializer as it's a random function. self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('mean', embedding_column.combiner) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) def test_all_constructor_args(self): categorical_column = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = tpu_fc.embedding_column_v2( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer') self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('my_initializer', embedding_column.initializer()) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual((embedding_dimension,), embedding_column.variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column._parse_example_spec) @test_util.deprecated_graph_mode_only def test_feature_layer_cpu(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 1), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 2)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) expected_lookups_sequence = ( # example 0, ids [2], embedding = [[7, 11], [0, 0]] ((7., 11.), (0., 0.),), # example 1, ids [0, 1], embedding = [[1, 2], [3. 5]] ((1., 2.), (3., 5.),), # example 2, ids [], embedding = [0, 0] ((0., 0.), (0., 0.),), # example 3, ids [1], embedding = [3, 5] ((3., 5.), (0., 0.),), ) # Build columns. categorical_column = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) sequence_categorical_column = ( fc_lib.sequence_categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size)) embedding_column = tpu_fc.embedding_column_v2( categorical_column, dimension=embedding_dimension, initializer=_initializer) sequence_embedding_column = tpu_fc.embedding_column_v2( sequence_categorical_column, dimension=embedding_dimension, initializer=_initializer, max_sequence_length=2) # Provide sparse input and get dense result. features = {'aaa': sparse_input, 'bbb': sparse_input} dense_features = fc_lib.DenseFeatures([embedding_column]) sequence_features = fc_lib.SequenceFeatures([sequence_embedding_column]) embedding_lookup = dense_features(features) sequence_embedding_lookup = sequence_features(features) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ('dense_features/aaa_embedding/embedding_weights:0', 'sequence_features/bbb_embedding/embedding_weights:0',), tuple([v.name for v in global_vars])) with _initialized_session(): self.assertAllEqual(embedding_values, global_vars[0].eval()) self.assertAllEqual(expected_lookups, embedding_lookup.eval()) self.assertAllEqual(expected_lookups_sequence, sequence_embedding_lookup[0].eval()) class SharedEmbeddingColumnTestV2(test.TestCase): @test_util.deprecated_graph_mode_only def test_defaults(self): vocabulary_size = 3 categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_dimension = 2 embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns_v2( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual((vocabulary_size, embedding_dimension), embedding_column_a.get_embedding_table_size()) self.assertEqual((vocabulary_size, embedding_dimension), embedding_column_a.get_embedding_table_size()) self.assertEqual('mean', embedding_column_a.combiner) self.assertEqual('mean', embedding_column_b.combiner) self.assertIsNotNone(embedding_column_a.get_initializer()) self.assertIsNotNone(embedding_column_b.get_initializer()) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_a.get_embedding_var_name()) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_b.get_embedding_var_name()) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape) @test_util.deprecated_graph_mode_only def test_all_constructor_args(self): vocabulary_size = 3 categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_dimension = 2 embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='var_scope_name') self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual((vocabulary_size, embedding_dimension), embedding_column_a.get_embedding_table_size()) self.assertEqual((vocabulary_size, embedding_dimension), embedding_column_a.get_embedding_table_size()) self.assertEqual('my_combiner', embedding_column_a.combiner) self.assertEqual('my_combiner', embedding_column_b.combiner) self.assertEqual('my_initializer', embedding_column_a.get_initializer()()) self.assertEqual('my_initializer', embedding_column_b.get_initializer()()) self.assertEqual('var_scope_name', embedding_column_a.get_embedding_var_name()) self.assertEqual('var_scope_name', embedding_column_b.get_embedding_var_name()) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape) @test_util.deprecated_graph_mode_only def test_feature_layer_cpu(self): # Inputs. vocabulary_size = 3 input_a = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(2, 2)) input_b = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] indices=((0, 0), (1, 0), (1, 1)), values=(2, 0, 1), dense_shape=(3, 2)) input_features = {'aaa': input_a, 'bbb': input_b} # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info=None): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups_a = ( # example 0: (7., 11.), # ids [2], embedding = [7, 11] # example 1: (2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] ) expected_lookups_b = ( # example 0: ((7., 11.), (0., 0.),), # ids [2], embedding = [[7, 11], [0, 0]] # example 1: ((1., 2.), (3., 5.),), # ids [0, 1], embedding = [[1, 2], [3, 5]] # example 2: ((0., 0.), (0., 0.),), # ids [], embedding = [[0, 0], [0, 0]] ) # Build columns. categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc_lib.sequence_categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer, max_sequence_lengths=[0, 2]) # Provide sparse input and get dense result. dense_features = fc_lib.DenseFeatures([embedding_column_a]) sequence_features = fc_lib.SequenceFeatures([embedding_column_b]) embedding_lookup_a = dense_features(input_features) embedding_lookup_b = sequence_features(input_features) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual( ('aaa_bbb_shared_embedding:0',), tuple([v.name for v in global_vars])) embedding_var = global_vars[0] with _initialized_session(): self.assertAllEqual(embedding_values, embedding_var.eval()) self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval()) self.assertAllEqual(expected_lookups_b, embedding_lookup_b[0].eval()) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/feature_column_v2_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================================== """Tests for python.tpu.feature_column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.feature_column import feature_column as fc from tensorflow.python.feature_column import feature_column_lib as fc_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import test from tensorflow.python.tpu import feature_column as tpu_fc def _initialized_session(): sess = session.Session() sess.run(variables_lib.global_variables_initializer()) sess.run(lookup_ops.tables_initializer()) return sess class EmbeddingColumnTest(test.TestCase): def test_defaults(self): categorical_column = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = tpu_fc.embedding_column( categorical_column, dimension=embedding_dimension) self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('mean', embedding_column.combiner) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual('aaa_embedding', embedding_column._var_scope_name) self.assertEqual((embedding_dimension,), embedding_column._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column._parse_example_spec) def test_all_constructor_args(self): categorical_column = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) embedding_dimension = 2 embedding_column = tpu_fc.embedding_column( categorical_column, dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer') self.assertIs(categorical_column, embedding_column.categorical_column) self.assertEqual(embedding_dimension, embedding_column.dimension) self.assertEqual('my_combiner', embedding_column.combiner) self.assertEqual('aaa_embedding', embedding_column.name) self.assertEqual('aaa_embedding', embedding_column._var_scope_name) self.assertEqual((embedding_dimension,), embedding_column._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column._parse_example_spec) @test_util.deprecated_graph_mode_only def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 sparse_input = sparse_tensor.SparseTensorValue( # example 0, ids [2] # example 1, ids [0, 1] # example 2, ids [] # example 3, ids [1] indices=((0, 0), (1, 0), (1, 4), (3, 0)), values=(2, 0, 1, 1), dense_shape=(4, 5)) # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups = ( # example 0, ids [2], embedding = [7, 11] (7., 11.), # example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] (2., 3.5), # example 2, ids [], embedding = [0, 0] (0., 0.), # example 3, ids [1], embedding = [3, 5] (3., 5.), ) # Build columns. categorical_column = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) embedding_column = tpu_fc.embedding_column( categorical_column, dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup = embedding_column._get_dense_tensor( fc._LazyBuilder({ 'aaa': sparse_input })) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) with _initialized_session(): self.assertAllEqual(embedding_values, global_vars[0].eval()) self.assertAllEqual(expected_lookups, embedding_lookup.eval()) class SharedEmbeddingColumnTest(test.TestCase): @test_util.deprecated_graph_mode_only def test_defaults(self): categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns( [categorical_column_b, categorical_column_a], dimension=embedding_dimension) self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(embedding_dimension, embedding_column_a.dimension) self.assertEqual(embedding_dimension, embedding_column_b.dimension) self.assertEqual('mean', embedding_column_a.combiner) self.assertEqual('mean', embedding_column_b.combiner) self.assertIsNotNone(embedding_column_a.initializer) self.assertIsNotNone(embedding_column_b.initializer) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_a.shared_embedding_collection_name) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_b.shared_embedding_collection_name) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_a._var_scope_name) self.assertEqual('aaa_bbb_shared_embedding', embedding_column_b._var_scope_name) self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a._parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b._parse_example_spec) @test_util.deprecated_graph_mode_only def test_all_constructor_args(self): categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=3) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=3) embedding_dimension = 2 embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, combiner='my_combiner', initializer=lambda: 'my_initializer', shared_embedding_collection_name='var_scope_name') self.assertIs(categorical_column_a, embedding_column_a.categorical_column) self.assertIs(categorical_column_b, embedding_column_b.categorical_column) self.assertEqual(embedding_dimension, embedding_column_a.dimension) self.assertEqual(embedding_dimension, embedding_column_b.dimension) self.assertEqual('my_combiner', embedding_column_a.combiner) self.assertEqual('my_combiner', embedding_column_b.combiner) self.assertEqual('my_initializer', embedding_column_a.initializer()) self.assertEqual('my_initializer', embedding_column_b.initializer()) self.assertEqual('var_scope_name', embedding_column_a.shared_embedding_collection_name) self.assertEqual('var_scope_name', embedding_column_b.shared_embedding_collection_name) self.assertEqual('aaa_shared_embedding', embedding_column_a.name) self.assertEqual('bbb_shared_embedding', embedding_column_b.name) self.assertEqual('var_scope_name', embedding_column_a._var_scope_name) self.assertEqual('var_scope_name', embedding_column_b._var_scope_name) self.assertEqual((embedding_dimension,), embedding_column_a._variable_shape) self.assertEqual((embedding_dimension,), embedding_column_b._variable_shape) self.assertEqual({ 'aaa': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_a._parse_example_spec) self.assertEqual({ 'bbb': parsing_ops.VarLenFeature(dtypes.int64) }, embedding_column_b._parse_example_spec) @test_util.deprecated_graph_mode_only def test_get_dense_tensor(self): # Inputs. vocabulary_size = 3 # -1 values are ignored. input_a = np.array([ [2, -1, -1], # example 0, ids [2] [0, 1, -1] ]) # example 1, ids [0, 1] input_b = np.array([ [0, -1, -1], # example 0, ids [0] [-1, -1, -1] ]) # example 1, ids [] input_features = {'aaa': input_a, 'bbb': input_b} # Embedding variable. embedding_dimension = 2 embedding_values = ( (1., 2.), # id 0 (3., 5.), # id 1 (7., 11.) # id 2 ) def _initializer(shape, dtype, partition_info): self.assertAllEqual((vocabulary_size, embedding_dimension), shape) self.assertEqual(dtypes.float32, dtype) self.assertIsNone(partition_info) return embedding_values # Expected lookup result, using combiner='mean'. expected_lookups_a = ( # example 0: (7., 11.), # ids [2], embedding = [7, 11] # example 1: (2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5] ) expected_lookups_b = ( # example 0: (1., 2.), # ids [0], embedding = [1, 2] # example 1: (0., 0.), # ids [], embedding = [0, 0] ) # Build columns. categorical_column_a = fc_lib.categorical_column_with_identity( key='aaa', num_buckets=vocabulary_size) categorical_column_b = fc_lib.categorical_column_with_identity( key='bbb', num_buckets=vocabulary_size) embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns( [categorical_column_a, categorical_column_b], dimension=embedding_dimension, initializer=_initializer) # Provide sparse input and get dense result. embedding_lookup_a = embedding_column_a._get_dense_tensor( fc._LazyBuilder(input_features)) embedding_lookup_b = embedding_column_b._get_dense_tensor( fc._LazyBuilder(input_features)) # Assert expected embedding variable and lookups. global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) self.assertItemsEqual(('embedding_weights:0',), tuple([v.name for v in global_vars])) embedding_var = global_vars[0] with _initialized_session(): self.assertAllEqual(embedding_values, embedding_var.eval()) self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval()) self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval()) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/feature_column_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Experimental TPU library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.tpu import tpu_strategy_util # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/experimental/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Operations for TPUs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops # pylint: disable=wildcard-import,unused-import from tensorflow.python.ops import gen_tpu_ops from tensorflow.python.ops.gen_tpu_ops import * # pylint: enable=wildcard-import,unused-import from tensorflow.python.platform import tf_logging as logging from tensorflow.python.tpu import tpu_function from tensorflow.python.util.tf_export import tf_export def _create_default_group_assignment(): num_shards = tpu_function.get_tpu_context().number_of_shards if num_shards is None: logging.warning( "cross_replica_sum should be used within a tpu_shard_context, but " "got unset number_of_shards. Assuming 1.") num_shards = 1 group_assignment = [list(range(num_shards))] return group_assignment def all_to_all(x, concat_dimension, split_dimension, split_count, group_assignment=None, name=None): """Exchange data across TPU replicas. Args: x: The local tensor. concat_dimension: The dimension number to concatenate. split_dimension: The dimension number to split. split_count: The number of splits, this number must equal to the sub-group size(group_assignment.get_shape()[1]) group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the replica ids in the ith subgroup. name: Optional op name. Returns: A `Tensor` which is concatenated by data from different replicas. """ if group_assignment is None: group_assignment = _create_default_group_assignment() return gen_tpu_ops.all_to_all( x, group_assignment, concat_dimension=concat_dimension, split_dimension=split_dimension, split_count=split_count, name=name) @ops.RegisterGradient("AllToAll") def _all_to_all_grad(op, grad): # The gradient of a all-to-all is also a all-to-all but the # split_dimension and concat_dimension is swapped. # The graident with respect to group_assignment is None. return [ gen_tpu_ops.all_to_all( grad, op.inputs[1], concat_dimension=op.get_attr("split_dimension"), split_dimension=op.get_attr("concat_dimension"), split_count=op.get_attr("split_count")), None ] @tf_export(v1=["tpu.cross_replica_sum"]) def cross_replica_sum(x, group_assignment=None, name=None): """Sum the input tensor across replicas according to group_assignment. Args: x: The local tensor to the sum. group_assignment: Optional 2d int32 lists with shape [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the replica ids in the ith subgroup. name: Optional op name. Returns: A `Tensor` which is summed across replicas. """ if group_assignment is None: group_assignment = _create_default_group_assignment() return gen_tpu_ops.cross_replica_sum(x, group_assignment, name=name) def collective_permute(x, source_target_pairs, name=None): """Permute the input tensor across replicas given source_target_pairs. For each source_target_pair <a, b>, we send replica a's input to replica b. Each replica id must only appear once in the source column. Also it must only appear once in the target column. For the replica id not in the target column, this op returns a zero tensor with the same shape and dtype of the input x. For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing source_target_pairs=`[[0,1],[1,2],[2,3]]` gets the outputs: `[0, A, B, C]`. Args: x: The local tensor to be permuted. source_target_pairs: 2d int lists with shape [num_pairs, 2]. source_target_pairs[i][0] represents the source replica id and source_target_pairs[i][1] represents the target replica id. name: Optional op name. Returns: A `Tensor` which is permuted. """ return gen_tpu_ops.collective_permute(x, source_target_pairs, name=name) @ops.RegisterGradient("CollectivePermute") def _collective_permute_grad(op, grad): # The gradient of a collective permute operation is also a collective # permute, but with source/target pairs reversed. The gradient with respect # to input argument `source_target_pairs` is `None`. source_target_pairs = op.inputs[1][:, ::-1] return [gen_tpu_ops.collective_permute(grad, source_target_pairs), None] @ops.RegisterGradient("CrossReplicaSum") def _cross_replica_sum_grad(op, grad): # The gradient of a cross replica sum is also a cross-replica sum. # The gradient with respect to group_assignment is None. return [gen_tpu_ops.cross_replica_sum(grad, op.inputs[1]), None] # This extra type checking exists to give a more helpful error message in # the common case that uint8 and int64 values are infed. Remove when both # types are supported. _SUPPORTED_INFEED_DTYPES = set([ dtypes.bool, dtypes.int32, dtypes.int64, dtypes.bfloat16, dtypes.float32, dtypes.complex64, dtypes.uint32 ]) @ops.RegisterGradient("TPUEmbeddingActivations") def _embedding_activations_grad(activations_op, grad_wrt_activations): """Saves the gradient of embedding activations ops in a graph collection.""" g = ops.get_default_graph() table_id = activations_op.get_attr("table_id") lookup_id = activations_op.get_attr("lookup_id") table_gradients = g.get_collection_ref( "tpu_embedding_gradients_table_%d" % table_id) if not table_gradients: raise RuntimeError( "Gradients for TPUEmbedding have been generated in non-training mode." "This is not expected. Consider putting your Optimizer.minimize code " "behind the training mode condition check. For Estimator, you can " "do \n\n" " if mode == tf.estimator.ModeKeys.TRAIN:\n" " train_op = opt.minimize(loss)\n" "\n") table_gradients[lookup_id] = array_ops.identity(grad_wrt_activations) return [ # RegisterGradient requires that value be returned for all inputs. Since # the first argument (tpu_gradient_variable_{table_name}) has shape [1], # we will return zeros(shape=[1]). The actual gradient w.r.t. the # embedding activations (grad_wrt_activations) has the same shape as the # activations returned by embedding_activations. array_ops.zeros(arg.shape, dtype=dtypes.float32) for arg in activations_op.inputs ] def infeed_dequeue(dtype, shape, name=None): """A placeholder op for a value that will be fed into the computation. Args: dtype: A `tf.DType`. The type of elements in the tensor. shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. A tensor that will be provided using the infeed mechanism. Raises: TypeError: If 'dtype` is not a supported infeed type. """ if dtype not in _SUPPORTED_INFEED_DTYPES: raise TypeError( "{} is not a supported TPU infeed type. Supported types are: " "{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES))) return gen_tpu_ops.infeed_dequeue(dtype, shape, name=name) # pylint: disable=redefined-outer-name def infeed_dequeue_tuple(dtypes, shapes, name=None): """A placeholder op for values fed into the TPU simultaneously as a tuple. Args: dtypes: A list of `tf.DType`s that has length `>= 1`. The element types of each element in `outputs`. shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). The shapes of each tensor in `outputs`. name: A name for the operation (optional). Returns: A list of `Tensor` objects of type `dtypes`. A list of tensors that will be provided using the infeed mechanism. Raises: TypeError: If a type in 'dtypes` is not a supported infeed type. """ for dtype in dtypes: if dtype not in _SUPPORTED_INFEED_DTYPES: raise TypeError( "{} is not a supported TPU infeed type. Supported types are: " "{}".format(dtype, list(_SUPPORTED_INFEED_DTYPES))) return gen_tpu_ops.infeed_dequeue_tuple(dtypes, shapes, name=name) # pylint: enable=redefined-outer-name # pylint: disable=protected-access def send_tpu_embedding_gradients(inputs, config, learning_rates=None, name=None): """A placeholder op for feeding per-sample gradients to the embedding layer. Args: inputs: A TensorList of gradients with which to update embedding tables. This argument has the same length and shapes as the return value of RecvTPUEmbeddingActivations, but contains gradients of the model's loss with respect to the embedding activations. The embedding tables are updated from these gradients via the optimizers specified in the TPU embedding configuration given to tpu.initialize_system. config: Serialized TPUEmbeddingConfiguration proto. learning_rates: A TensorList of float32 scalars, one for each dynamic learning rate tag: see the comments in //third_party/tensorflow/core/protobuf/tpu/ optimization_parameters.proto. Multiple tables can share the same dynamic learning rate tag as specified in the configuration. If the learning rates for all tables are constant, this list should be empty. name: A name for the operation (optional). Returns: A SendTPUEmbeddingGradients operation. """ if learning_rates is None: learning_rates = [] return gen_tpu_ops.send_tpu_embedding_gradients( inputs=inputs, learning_rates=learning_rates, config=config, name=name) send_tpu_embedding_gradients.__doc__ = ( gen_tpu_ops.send_tpu_embedding_gradients.__doc__) # pylint: disable=protected-access def enqueue_tpu_embedding_integer_batch(batch, device_ordinal, mode_override=None, name=None): """A placeholder op for enqueueing embedding IDs to the TPU. Args: batch: A list of 1D tensors, one for each embedding table, containing the indices into the tables. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingIntegerBatch operation. """ if mode_override is None: mode_override = "unspecified" return gen_tpu_ops.enqueue_tpu_embedding_integer_batch( batch=batch, device_ordinal=device_ordinal, mode_override=mode_override, name=name) enqueue_tpu_embedding_integer_batch.__doc__ = ( gen_tpu_ops.enqueue_tpu_embedding_integer_batch.__doc__) # pylint: disable=protected-access def enqueue_tpu_embedding_sparse_batch(sample_indices, embedding_indices, aggregation_weights, device_ordinal, combiners=None, mode_override=None, name=None): """A placeholder op for enqueueing embedding IDs to the TPU. Args: sample_indices: A list of rank 1 Tensors specifying the training example and feature to which the corresponding embedding_indices and aggregation_weights values belong. sample_indices[i] must equal b * nf + f, where nf is the number of features from the corresponding table, f is in [0, nf), and b is in [0, batch size). Both int32 and int64 are allowed, and will be converted to int32 internally. embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. Both int32 and int64 are allowed and will be converted to int32 internally. aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per (training example, feature) -- aggregation weights. Both float32 and float64 are allowed and will be converted to float32 internally. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. combiners: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables (optional). mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingSparseBatch operation. """ if mode_override is None: mode_override = "unspecified" return gen_tpu_ops.enqueue_tpu_embedding_sparse_batch( sample_indices=sample_indices, embedding_indices=embedding_indices, aggregation_weights=aggregation_weights, device_ordinal=device_ordinal, combiners=combiners, mode_override=mode_override, name=name) enqueue_tpu_embedding_sparse_batch.__doc__ = ( gen_tpu_ops.enqueue_tpu_embedding_sparse_batch.__doc__) # pylint: disable=protected-access def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices, embedding_indices, aggregation_weights, table_ids, device_ordinal, max_sequence_lengths=None, combiners=None, mode_override=None, name=None): """A placeholder op for enqueueing embedding IDs to the TPU. Args: sample_indices: A list of rank 2 Tensors specifying the training example to which the corresponding embedding_indices and aggregation_weights values belong. It corresponds to sp_ids.indices in embedding_lookup_sparse(). If the size of its first dimension is 0, we assume each embedding_indices belongs to a different sample. Both int32 and int64 are allowed and will be converted to int32 internally. embedding_indices: A list of rank 1 Tensors, indices into the embedding tables. It corresponds to sp_ids.values in embedding_lookup_sparse(). Both int32 and int64 are allowed and will be converted to int32 internally. aggregation_weights: A list of rank 1 Tensors containing per training example aggregation weights. It corresponds to sp_weights.values in embedding_lookup_sparse(). If the size of its first dimension is 0, we assume all weights are 1. Both float32 and float64 are allowed and will be converted to float32 internally. table_ids: A list of integers specifying the identifier of the embedding table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the corresponding input. The ith input is looked up using table_ids[i]. The size of the table_ids list must be equal to that of sample_indices, embedding_indices and aggregation_weights. device_ordinal: The TPU device to use. Should be >= 0 and less than the number of TPU cores in the task on which the node is placed. max_sequence_lengths: A list of integers, the size of which is equal to sample_indices. If equal to 0, the corresponding feature is considered to be a non-sequence feature, If greater than 0, the corresponding feature is a sequence feature with the given maximal length. If None, then we assume a list of all zeroes. combiners: A list of string scalars, one for each embedding table that specify how to normalize the embedding activations after weighted summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have the sum of the weights be 0 for 'mean' or the sum of the squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for all tables (optional). mode_override: A string input that overrides the mode specified in the TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference', 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override is used (optional). name: A name for the operation (optional). Returns: An EnqueueTPUEmbeddingSparseTensorBatch operation. """ if mode_override is None: mode_override = "unspecified" return gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch( sample_indices=sample_indices, embedding_indices=embedding_indices, aggregation_weights=aggregation_weights, table_ids=table_ids, device_ordinal=device_ordinal, max_sequence_lengths=max_sequence_lengths, combiners=combiners, mode_override=mode_override, name=name) enqueue_tpu_embedding_sparse_tensor_batch.__doc__ = ( gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch.__doc__)
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/ops/tpu_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Operations to select TPU core to run.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/ops/tpu_ordinal_selector_op.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Cloud TPU profiler version information.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Cloud TPU profiler uses semantic versioning, see http://semver.org/. # A version string consists of # major_version.minor_version.patch_version-build_metadata. __version__ = "1.14.1-a0"
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/profiler/version.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! # # Do not use pylint on generated code. # pylint: disable=missing-docstring,g-short-docstring-punctuation,g-no-space-after-docstring-summary,invalid-name,line-too-long,unused-argument,g-doc-args from __future__ import absolute_import from __future__ import division from __future__ import print_function import grpc from tensorflow.core.profiler import profiler_analysis_pb2 as third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 class ProfileAnalysisStub(object): """////////////////////////////////////////////////////////////////////////////// ProfileAnalysis service provide entry point for profiling TPU and for serving profiled data to Tensorboard through GRPC ////////////////////////////////////////////////////////////////////////////// """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.NewSession = channel.unary_unary( '/tensorflow.ProfileAnalysis/NewSession', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .NewProfileSessionRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .NewProfileSessionResponse.FromString, ) self.EnumSessions = channel.unary_unary( '/tensorflow.ProfileAnalysis/EnumSessions', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .EnumProfileSessionsAndToolsRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .EnumProfileSessionsAndToolsResponse.FromString, ) self.GetSessionToolData = channel.unary_unary( '/tensorflow.ProfileAnalysis/GetSessionToolData', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .ProfileSessionDataRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .ProfileSessionDataResponse.FromString, ) class ProfileAnalysisServicer(object): """////////////////////////////////////////////////////////////////////////////// ProfileAnalysis service provide entry point for profiling TPU and for serving profiled data to Tensorboard through GRPC ////////////////////////////////////////////////////////////////////////////// """ def NewSession(self, request, context): """Starts a profiling session, blocks until it completes. TPUProfileAnalysis service delegate this to TPUProfiler service. Populate the profiled data in repository, then return status to caller. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def EnumSessions(self, request, context): """Enumerate existing sessions and return available profile tools.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSessionToolData(self, request, context): """Retrieve specific tool's data for specific session.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ProfileAnalysisServicer_to_server(servicer, server): rpc_method_handlers = { 'NewSession': grpc.unary_unary_rpc_method_handler( servicer.NewSession, request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .NewProfileSessionRequest.FromString, response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .NewProfileSessionResponse.SerializeToString, ), 'EnumSessions': grpc.unary_unary_rpc_method_handler( servicer.EnumSessions, request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .EnumProfileSessionsAndToolsRequest.FromString, response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .EnumProfileSessionsAndToolsResponse.SerializeToString, ), 'GetSessionToolData': grpc.unary_unary_rpc_method_handler( servicer.GetSessionToolData, request_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .ProfileSessionDataRequest.FromString, response_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2 .ProfileSessionDataResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'tensorflow.ProfileAnalysis', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/profiler/profiler_analysis_pb2_grpc.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Classes for TPU trace events.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import,unused-import from tensorflow.core.protobuf.trace_events_pb2 import * from tensorflow.core.profiler.profiler_analysis_pb2 import * # pylint: enable=wildcard-import,unused-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = ['Trace', 'Resource', 'Device', 'TraceEvent'] remove_undocumented(__name__, _allowed_symbols)
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/profiler/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Cloud TPU profiler client.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys from absl import app from absl import flags from distutils.version import LooseVersion from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver as resolver from tensorflow.python.eager import profiler_client from tensorflow.python.framework import errors from tensorflow.python.framework import versions from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.tpu.profiler import version as profiler_version FLAGS = flags.FLAGS # Cloud TPU Cluster Resolvers flags.DEFINE_string( 'gcp_project', None, 'Project name for the Cloud TPU-enabled project. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_zone', None, help='GCE zone where the Cloud TPU is located in. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu', None, 'Name of the Cloud TPU for Cluster Resolvers. You must ' 'specify either this flag or --service_addr.') # Tool specific parameters flags.DEFINE_string( 'service_addr', None, 'Address of TPU profiler service e.g. ' 'localhost:8466, you must specify either this flag or --tpu.') flags.DEFINE_string( 'workers_list', None, 'The list of worker TPUs that we are about to profile' ' e.g. 10.0.1.2, 10.0.1.3. You can specify this flag with --tpu or ' '--service_addr to profile a subset of tpu nodes. You can also use only' '--tpu and leave this flag unspecified to profile all the tpus.') flags.DEFINE_string( 'logdir', None, 'Path of TensorBoard log directory e.g. /tmp/tb_log, ' 'gs://tb_bucket') flags.DEFINE_integer('duration_ms', 0, 'Duration of tracing or monitoring in ms.') flags.DEFINE_integer( 'num_tracing_attempts', 3, 'Automatically retry N times when no trace ' 'event is collected.') flags.DEFINE_boolean('include_dataset_ops', True, 'Set to false to profile longer TPU ' 'device traces.') # Monitoring parameters flags.DEFINE_integer( 'monitoring_level', 0, 'Choose a monitoring level between ' '1 and 2 to monitor your TPU job continuously. Level 2 is more verbose than' ' level 1 and shows more metrics.') flags.DEFINE_integer( 'num_queries', 100, 'This script will run monitoring for num_queries before it stops.') flags.DEFINE_boolean('display_timestamp', False, 'Set to true to display timestamp in monitoring results.') def get_workers_list(cluster_resolver): """Returns a comma separated list of TPU worker IP addresses. Gets cluster_spec from cluster_resolver. Use the worker's task indices to obtain and return a list of ip addresses. Args: cluster_resolver: TensorFlow TPUClusterResolver instance. Returns: A string of comma separated list of IP addresses. For example: '10.2.0.1,10.2.0.2,10.2.0.3,10.2.0.4' Raises: UnavailableError: cluster_resolver doesn't contain a valid cluster_spec. """ worker_job_name = 'worker' cluster_spec = cluster_resolver.cluster_spec() if not cluster_spec: raise errors.UnavailableError( 'None', 'None', 'Cluster spec not found, your client must run in GCE environment.') task_indices = cluster_spec.task_indices(worker_job_name) workers_list = [ cluster_spec.task_address(worker_job_name, i).split(':')[0] for i in task_indices ] return ','.join(workers_list) def monitoring_helper(service_addr, duration_ms, monitoring_level, display_timestamp, num_queries): """Helper function to print monitoring results. Helper function to print monitoring results for num_queries times. Args: service_addr: Address of the TPU profiler service. duration_ms: Duration of one monitoring sample in milliseconds. monitoring_level: An integer between 1 and 2. Level 2 is more verbose than level 1 and shows more metrics. display_timestamp: Set to true to display timestamp in monitoring. num_queries: Number of monitoring samples to collect. """ if monitoring_level <= 0 or monitoring_level > 2: sys.exit('Please choose a monitoring level between 1 and 2.') for query in range(0, num_queries): res = profiler_client.monitor(service_addr, duration_ms, monitoring_level, display_timestamp) print('Cloud TPU Monitoring Results (Sample ', query, '):\n\n', res) def run_main(): app.run(main) def main(unused_argv=None): logging.set_verbosity(logging.INFO) tf_version = versions.__version__ print('TensorFlow version %s detected' % tf_version) print('Welcome to the Cloud TPU Profiler v%s' % profiler_version.__version__) if LooseVersion(tf_version) < LooseVersion('1.14.0'): sys.exit('You must install tensorflow >= 1.14.0 to use this plugin.') if not FLAGS.service_addr and not FLAGS.tpu: sys.exit('You must specify either --service_addr or --tpu.') tpu_cluster_resolver = None if FLAGS.service_addr: if FLAGS.tpu: logging.warn('Both --service_addr and --tpu are set. Ignoring ' '--tpu and using --service_addr.') service_addr = FLAGS.service_addr else: try: tpu_cluster_resolver = ( resolver.TPUClusterResolver([FLAGS.tpu], zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)) service_addr = tpu_cluster_resolver.get_master() except (ValueError, TypeError): sys.exit('Failed to find TPU %s in zone %s project %s. You may use ' '--tpu_zone and --gcp_project to specify the zone and project of' ' your TPU.' % (FLAGS.tpu, FLAGS.tpu_zone, FLAGS.gcp_project)) service_addr = service_addr.replace('grpc://', '').replace(':8470', ':8466') workers_list = '' if FLAGS.workers_list is not None: workers_list = FLAGS.workers_list elif tpu_cluster_resolver is not None: workers_list = get_workers_list(tpu_cluster_resolver) # If profiling duration was not set by user or set to a non-positive value, # we set it to a default value of 1000ms. duration_ms = FLAGS.duration_ms if FLAGS.duration_ms > 0 else 1000 if FLAGS.monitoring_level > 0: print('Since monitoring level is provided, profile', service_addr, ' for ', FLAGS.duration_ms, ' ms and show metrics for ', FLAGS.num_queries, ' time(s).') monitoring_helper(service_addr, duration_ms, FLAGS.monitoring_level, FLAGS.display_timestamp, FLAGS.num_queries) else: if not FLAGS.logdir: sys.exit('You must specify either --logdir or --monitoring_level.') if not gfile.Exists(FLAGS.logdir): gfile.MakeDirs(FLAGS.logdir) try: profiler_client.start_tracing(service_addr, os.path.expanduser(FLAGS.logdir), duration_ms, workers_list, FLAGS.include_dataset_ops, FLAGS.num_tracing_attempts) except errors.UnavailableError: sys.exit(0) if __name__ == '__main__': run_main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/profiler/capture_tpu_profile.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Cloud TPU profiler package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from setuptools import setup from cloud_tpu_profiler.version import __version__ CONSOLE_SCRIPTS = [ 'capture_tpu_profile=cloud_tpu_profiler.capture_tpu_profile:run_main', ] setup( name='cloud_tpu_profiler', version=__version__.replace('-', ''), description='Trace and profile Cloud TPU performance', long_description='Tools for capture TPU profile', url='https://www.tensorflow.org/tfrc/', author='Google Inc.', author_email='packages@tensorflow.org', packages=['cloud_tpu_profiler'], entry_points={ 'console_scripts': CONSOLE_SCRIPTS, }, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], license='Apache 2.0', keywords='tensorflow performance tpu', )
tensorflow-r1.15.5-nv23.03
tensorflow/python/tpu/profiler/pip_package/setup.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """`tf.data.Dataset` API for input pipelines. See [Importing Data](https://tensorflow.org/guide/datasets) for an overview. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.data import experimental from tensorflow.python.data.ops.dataset_ops import Dataset from tensorflow.python.data.ops.dataset_ops import make_initializable_iterator from tensorflow.python.data.ops.dataset_ops import make_one_shot_iterator from tensorflow.python.data.ops.iterator_ops import Iterator from tensorflow.python.data.ops.readers import FixedLengthRecordDataset from tensorflow.python.data.ops.readers import TextLineDataset from tensorflow.python.data.ops.readers import TFRecordDataset # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Experimental API for building input pipelines. This module contains experimental `Dataset` sources and transformations that can be used in conjunction with the `tf.data.Dataset` API. Note that the `tf.data.experimental` API is not subject to the same backwards compatibility guarantees as `tf.data`, but we will provide deprecation advice in advance of removing existing functionality. See [Importing Data](https://tensorflow.org/guide/datasets) for an overview. @@Counter @@CheckpointInputPipelineHook @@CsvDataset @@DatasetStructure @@DistributeOptions @@MapVectorizationOptions @@OptimizationOptions @@Optional @@OptionalStructure @@RaggedTensorStructure @@RandomDataset @@Reducer @@SparseTensorStructure @@SqlDataset @@StatsAggregator @@StatsOptions @@Structure @@TFRecordWriter @@TensorArrayStructure @@TensorStructure @@ThreadingOptions @@bucket_by_sequence_length @@bytes_produced_stats @@cardinality @@choose_from_datasets @@copy_to_device @@dense_to_sparse_batch @@enumerate_dataset @@from_variant @@get_next_as_optional @@get_single_element @@get_structure @@group_by_reducer @@group_by_window @@ignore_errors @@latency_stats @@make_batched_features_dataset @@make_csv_dataset @@make_saveable_from_iterator @@map_and_batch @@map_and_batch_with_legacy_function @@parallel_interleave @@parse_example_dataset @@prefetch_to_device @@rejection_resample @@sample_from_datasets @@scan @@shuffle_and_repeat @@take_while @@to_variant @@unbatch @@unique @@AUTOTUNE @@INFINITE_CARDINALITY @@UNKNOWN_CARDINALITY """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch from tensorflow.python.data.experimental.ops.batching import map_and_batch from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function from tensorflow.python.data.experimental.ops.batching import unbatch from tensorflow.python.data.experimental.ops.cardinality import cardinality from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY from tensorflow.python.data.experimental.ops.counter import Counter from tensorflow.python.data.experimental.ops.distribute_options import DistributeOptions from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset from tensorflow.python.data.experimental.ops.error_ops import ignore_errors from tensorflow.python.data.experimental.ops.get_single_element import get_single_element from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length from tensorflow.python.data.experimental.ops.grouping import group_by_reducer from tensorflow.python.data.experimental.ops.grouping import group_by_window from tensorflow.python.data.experimental.ops.grouping import Reducer from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets from tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator from tensorflow.python.data.experimental.ops.optimization_options import MapVectorizationOptions from tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device from tensorflow.python.data.experimental.ops.random_ops import RandomDataset from tensorflow.python.data.experimental.ops.readers import CsvDataset from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset from tensorflow.python.data.experimental.ops.readers import make_csv_dataset from tensorflow.python.data.experimental.ops.readers import SqlDataset from tensorflow.python.data.experimental.ops.resampling import rejection_resample from tensorflow.python.data.experimental.ops.scan_ops import scan from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat from tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator from tensorflow.python.data.experimental.ops.stats_ops import bytes_produced_stats from tensorflow.python.data.experimental.ops.stats_ops import latency_stats from tensorflow.python.data.experimental.ops.stats_options import StatsOptions from tensorflow.python.data.experimental.ops.take_while_ops import take_while from tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions from tensorflow.python.data.experimental.ops.unique import unique from tensorflow.python.data.experimental.ops.writers import TFRecordWriter from tensorflow.python.data.ops.dataset_ops import AUTOTUNE from tensorflow.python.data.ops.dataset_ops import DatasetSpec as DatasetStructure from tensorflow.python.data.ops.dataset_ops import from_variant from tensorflow.python.data.ops.dataset_ops import get_structure from tensorflow.python.data.ops.dataset_ops import to_variant from tensorflow.python.data.ops.iterator_ops import get_next_as_optional from tensorflow.python.data.ops.optional_ops import Optional from tensorflow.python.data.ops.optional_ops import OptionalSpec as OptionalStructure from tensorflow.python.data.util.structure import _RaggedTensorStructure as RaggedTensorStructure from tensorflow.python.data.util.structure import _SparseTensorStructure as SparseTensorStructure from tensorflow.python.data.util.structure import _TensorArrayStructure as TensorArrayStructure from tensorflow.python.data.util.structure import _TensorStructure as TensorStructure from tensorflow.python.framework.type_spec import TypeSpec as Structure # pylint: enable=unused-import from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/__init__.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Graph mode cluster tests for the experimental `replicate` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.compat import compat from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import combinations from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test class ReplicateClusterTest(test_base.DatasetTestBase, parameterized.TestCase): def setUp(self): super(ReplicateClusterTest, self).setUp() # Start the local server. worker_config = config_pb2.ConfigProto() worker_config.device_count["CPU"] = 2 worker, _ = test_util.create_local_cluster( 3, 0, worker_config=worker_config) self._device0 = "/job:worker/replica:0/task:0/device:CPU:0" self._device1 = "/job:worker/replica:0/task:1/device:CPU:0" self._device2 = "/job:worker/replica:0/task:2/device:CPU:0" self._target = worker[0].target @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph"])) def testBasic(self): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): get_next = self.getNext(dataset0) with ops.device(self._device1): get_next1 = self.getNext(dataset1) with ops.device(self._device2): get_next2 = self.getNext(dataset2) with session.Session(self._target) as sess: for i in range(100): self.assertEqual(i, sess.run(get_next())) self.assertEqual(i, sess.run(get_next1())) self.assertEqual(i, sess.run(get_next2())) @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph"])) def testMap(self): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100).map(lambda x: x * 2) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): get_next = self.getNext(dataset0) with ops.device(self._device1): get_next1 = self.getNext(dataset1) with ops.device(self._device2): get_next2 = self.getNext(dataset2) with session.Session(self._target) as sess: for i in range(100): self.assertEqual(i * 2, sess.run(get_next())) self.assertEqual(i * 2, sess.run(get_next1())) self.assertEqual(i * 2, sess.run(get_next2())) @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph"])) def testVariableInput(self): with ops.device(self._device0): counter_var = variable_scope.get_variable( "counter", (), dtypes.int32, use_resource=True) dataset0 = dataset_ops.Dataset.range(100).map( lambda _: counter_var.assign_add(1)) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] with ops.device(self._device1): it1 = dataset_ops.make_initializable_iterator(dataset1) # We don't support stateful ops in functions as of now. with session.Session(self._target) as sess: with self.assertRaises(errors.FailedPreconditionError): sess.run(it1.initializer) @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph"])) def testWhitelistStatefulOp(self): with compat.forward_compatibility_horizon(2019, 9, 12): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100).map( lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda [], minval=1, maxval=10, dtype=dtypes.float32)) opt = dataset_ops.Options() opt.experimental_stateful_whitelist = ["RandomUniform"] dataset0 = dataset0.with_options(opt) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): get_next0 = self.getNext(dataset0) with ops.device(self._device1): get_next1 = self.getNext(dataset1) with ops.device(self._device2): get_next2 = self.getNext(dataset2) with session.Session(self._target) as sess: for _ in range(100): sess.run(get_next0()) sess.run(get_next1()) sess.run(get_next2()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/replicate_cluster_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.take_while()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.experimental.ops import take_while_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class TakeWhileTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.parameters((14, 2), (15, 2), (100, 3)) def testTakeWhileDataset(self, num_elements, window_size): def _predicate_func(elem): return array_ops.shape(elem)[0] > (window_size - 1) take_while = take_while_ops.take_while(_predicate_func) dataset = dataset_ops.Dataset.range(num_elements).batch(window_size) dataset = dataset.apply(take_while).flat_map( dataset_ops.Dataset.from_tensor_slices) expected_num_elements = int(num_elements / window_size) * window_size self.assertDatasetProduces(dataset, np.arange(expected_num_elements)) @parameterized.parameters((10, 2, False), (16, 7, False), (100, 99, False), (100, 101, True), (0, 1, True)) def testTakeWhileDatasetRange(self, num_elements, upper_bound, out_of_bounds): dataset = dataset_ops.Dataset.range(num_elements).apply( take_while_ops.take_while(lambda x: x < upper_bound)) if out_of_bounds: with self.assertRaises(errors.OutOfRangeError): self.assertDatasetProduces(dataset, np.arange(upper_bound)) else: self.assertDatasetProduces(dataset, np.arange(upper_bound)) def testTakeWhileDatasetString(self): def not_equal(string): return lambda x: math_ops.not_equal(x, constant_op.constant(string)) string = ["this", "is", "the", "test", "for", "strings"] dataset = dataset_ops.Dataset.from_tensor_slices(string).apply( take_while_ops.take_while(not_equal("test"))) next_element = self.getNext(dataset) self.assertEqual(b"this", self.evaluate(next_element())) self.assertEqual(b"is", self.evaluate(next_element())) self.assertEqual(b"the", self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.assertEqual(b"test", self.evaluate(next_element())) @parameterized.parameters((5, 3), (10, 0), (100, 5), (8, 7)) def testTakewhileDatasetShortCircuit(self, size, index): def _predicate_func(data_elem): return data_elem boolean_array = [True] * size boolean_array[index] = False dataset = dataset_ops.Dataset.from_tensor_slices(boolean_array).apply( take_while_ops.take_while(_predicate_func)) next_element = self.getNext(dataset) for _ in range(index): self.assertTrue(self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTakeWhileDatasetWithRepeat(self): dataset = dataset_ops.Dataset.range(10).apply( take_while_ops.take_while(lambda x: x < 2)).repeat(5) self.assertDatasetProduces(dataset, np.tile([0, 1], 5)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/take_while_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.shuffle_and_repeat()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.ops import shuffle_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ShuffleAndRepeatTest(test_base.DatasetTestBase): def _build_ds(self, seed, count=5, num_elements=20): return dataset_ops.Dataset.range(num_elements).apply( shuffle_ops.shuffle_and_repeat(buffer_size=5, count=count, seed=seed)) def _gen_outputs(self, ds_fn, num_outputs, verify_exhausted=True): get_next = self.getNext(ds_fn()) outputs = [] for _ in range(num_outputs): outputs.append(self.evaluate(get_next())) if verify_exhausted: with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) return outputs def testCorrectOutput(self): output = self._gen_outputs(lambda: self._build_ds(10), 100) self.assertSequenceEqual( sorted(output), sorted( np.array([range(20) for _ in range(5)]).flatten())) for i in range(5): self.assertSequenceEqual(sorted(output[i * 20:(i + 1) * 20]), range(20)) def testReshuffling(self): # Check that the output orders of different epochs are indeed different. output = self._gen_outputs(lambda: self._build_ds(10), 100) for i in range(4): epoch1 = output[i * 20:(i + 1) * 20] epoch2 = output[(i + 1) * 20:(i + 2) * 20] self.assertNotEqual(epoch1, epoch2) def testSameOrderForSameSeeds(self): output1 = self._gen_outputs(lambda: self._build_ds(10), 100) output2 = self._gen_outputs(lambda: self._build_ds(10), 100) self.assertEqual(output1, output2) def testDifferentOrderForDifferentSeeds(self): output1 = self._gen_outputs(lambda: self._build_ds(10), 100) output2 = self._gen_outputs(lambda: self._build_ds(20), 100) self.assertNotEqual(output1, output2) self.assertEqual(sorted(output1), sorted(output2)) def testCountNone(self): output1 = self._gen_outputs( lambda: self._build_ds(10, count=None), 100, verify_exhausted=False) output2 = self._gen_outputs( lambda: self._build_ds(20, count=None), 100, verify_exhausted=False) self.assertNotEqual(output1, output2) self.assertEqual(sorted(output1), sorted(output2)) def testCountMinusOne(self): output1 = self._gen_outputs( lambda: self._build_ds(10, count=-1), 100, verify_exhausted=False) output2 = self._gen_outputs( lambda: self._build_ds(20, count=-1), 100, verify_exhausted=False) self.assertNotEqual(output1, output2) self.assertEqual(sorted(output1), sorted(output2)) def testInfiniteOutputs(self): # Asserting the iterator is exhausted after producing 100 items should fail. with self.assertRaises(AssertionError): self._gen_outputs(lambda: self._build_ds(10, count=None), 100) with self.assertRaises(AssertionError): self._gen_outputs(lambda: self._build_ds(10, count=-1), 100) def testInfiniteEmpty(self): with self.assertRaises(errors.OutOfRangeError): self._gen_outputs(lambda: self._build_ds(10, count=None, num_elements=0), 100) with self.assertRaises(errors.OutOfRangeError): self._gen_outputs(lambda: self._build_ds(10, count=-1, num_elements=0), 100) def testLargeBufferSize(self): ds = dataset_ops.Dataset.range(20).apply( shuffle_ops.shuffle_and_repeat(buffer_size=21)) get_next = self.getNext(ds) self.evaluate(get_next()) def testVeryLargeBufferSize(self): num_epochs = 1000 * 1000 # Each element being shuffled and repeated has shape (100,). This will OOM # or timeout if we actually load everything into the buffer. ds = dataset_ops.Dataset.range(500).batch(100).apply( shuffle_ops.shuffle_and_repeat( buffer_size=5 * num_epochs, count=num_epochs)) # Verify two epochs worth of output. output = self._gen_outputs(lambda: ds, 2 * 5, verify_exhausted=False) for i in range(2): sorted_epoch = sorted( output[i * 5:(i + 1) * 5], key=lambda batch: batch[0]) self.assertAllEqual(sorted_epoch, np.arange(500).reshape([5, 100])) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/shuffle_and_repeat_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.group_by_window()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test # NOTE(mrry): These tests are based on the tests in bucket_ops_test.py. # Currently, they use a constant batch size, though should be made to use a # different batch size per key. @test_util.run_all_in_graph_and_eager_modes class GroupByWindowTest(test_base.DatasetTestBase): def _dynamicPad(self, bucket, window, window_size): # TODO(mrry): To match `tf.contrib.training.bucket()`, implement a # generic form of padded_batch that pads every component # dynamically and does not rely on static shape information about # the arguments. return dataset_ops.Dataset.zip( (dataset_ops.Dataset.from_tensors(bucket), window.padded_batch( 32, (tensor_shape.TensorShape([]), tensor_shape.TensorShape( [None]), tensor_shape.TensorShape([3]))))) def testSingleBucket(self): def _map_fn(v): return (v, array_ops.fill([v], v), array_ops.fill([3], string_ops.as_string(v))) input_dataset = dataset_ops.Dataset.from_tensor_slices( math_ops.range(32)).map(_map_fn) bucketed_dataset = input_dataset.apply( grouping.group_by_window( lambda x, y, z: 0, lambda k, bucket: self._dynamicPad(k, bucket, 32), 32)) get_next = self.getNext(bucketed_dataset) which_bucket, bucketed_values = self.evaluate(get_next()) self.assertEqual(0, which_bucket) expected_scalar_int = np.arange(32, dtype=np.int64) expected_unk_int64 = np.zeros((32, 31)).astype(np.int64) for i in range(32): expected_unk_int64[i, :i] = i expected_vec3_str = np.vstack(3 * [np.arange(32).astype(bytes)]).T self.assertAllEqual(expected_scalar_int, bucketed_values[0]) self.assertAllEqual(expected_unk_int64, bucketed_values[1]) self.assertAllEqual(expected_vec3_str, bucketed_values[2]) def testEvenOddBuckets(self): def _map_fn(v): return (v, array_ops.fill([v], v), array_ops.fill([3], string_ops.as_string(v))) input_dataset = dataset_ops.Dataset.from_tensor_slices( math_ops.range(64)).map(_map_fn) bucketed_dataset = input_dataset.apply( grouping.group_by_window( lambda x, y, z: math_ops.cast(x % 2, dtypes.int64), lambda k, bucket: self._dynamicPad(k, bucket, 32), 32)) get_next = self.getNext(bucketed_dataset) # Get two minibatches (one containing even values, one containing odds) which_bucket_even, bucketed_values_even = self.evaluate(get_next()) which_bucket_odd, bucketed_values_odd = self.evaluate(get_next()) # Count number of bucket_tensors. self.assertEqual(3, len(bucketed_values_even)) self.assertEqual(3, len(bucketed_values_odd)) # Ensure bucket 0 was used for all minibatch entries. self.assertAllEqual(0, which_bucket_even) self.assertAllEqual(1, which_bucket_odd) # Test the first bucket outputted, the events starting at 0 expected_scalar_int = np.arange(0, 32 * 2, 2, dtype=np.int64) expected_unk_int64 = np.zeros((32, 31 * 2)).astype(np.int64) for i in range(0, 32): expected_unk_int64[i, :2 * i] = 2 * i expected_vec3_str = np.vstack( 3 * [np.arange(0, 32 * 2, 2).astype(bytes)]).T self.assertAllEqual(expected_scalar_int, bucketed_values_even[0]) self.assertAllEqual(expected_unk_int64, bucketed_values_even[1]) self.assertAllEqual(expected_vec3_str, bucketed_values_even[2]) # Test the second bucket outputted, the odds starting at 1 expected_scalar_int = np.arange(1, 32 * 2 + 1, 2, dtype=np.int64) expected_unk_int64 = np.zeros((32, 31 * 2 + 1)).astype(np.int64) for i in range(0, 32): expected_unk_int64[i, :2 * i + 1] = 2 * i + 1 expected_vec3_str = np.vstack( 3 * [np.arange(1, 32 * 2 + 1, 2).astype(bytes)]).T self.assertAllEqual(expected_scalar_int, bucketed_values_odd[0]) self.assertAllEqual(expected_unk_int64, bucketed_values_odd[1]) self.assertAllEqual(expected_vec3_str, bucketed_values_odd[2]) def testEvenOddBucketsFilterOutAllOdd(self): def _map_fn(v): return { "x": v, "y": array_ops.fill([v], v), "z": array_ops.fill([3], string_ops.as_string(v)) } def _dynamic_pad_fn(bucket, window, _): return dataset_ops.Dataset.zip( (dataset_ops.Dataset.from_tensors(bucket), window.padded_batch( 32, { "x": tensor_shape.TensorShape([]), "y": tensor_shape.TensorShape([None]), "z": tensor_shape.TensorShape([3]) }))) input_dataset = dataset_ops.Dataset.from_tensor_slices(math_ops.range( 128)).map(_map_fn).filter(lambda d: math_ops.equal(d["x"] % 2, 0)) bucketed_dataset = input_dataset.apply( grouping.group_by_window( lambda d: math_ops.cast(d["x"] % 2, dtypes.int64), lambda k, bucket: _dynamic_pad_fn(k, bucket, 32), 32)) get_next = self.getNext(bucketed_dataset) # Get two minibatches ([0, 2, ...] and [64, 66, ...]) which_bucket0, bucketed_values_even0 = self.evaluate(get_next()) which_bucket1, bucketed_values_even1 = self.evaluate(get_next()) # Ensure that bucket 1 was completely filtered out self.assertAllEqual(0, which_bucket0) self.assertAllEqual(0, which_bucket1) self.assertAllEqual( np.arange(0, 64, 2, dtype=np.int64), bucketed_values_even0["x"]) self.assertAllEqual( np.arange(64, 128, 2, dtype=np.int64), bucketed_values_even1["x"]) def testDynamicWindowSize(self): components = np.arange(100).astype(np.int64) # Key fn: even/odd # Reduce fn: batches of 5 # Window size fn: even=5, odd=10 def window_size_func(key): window_sizes = constant_op.constant([5, 10], dtype=dtypes.int64) return window_sizes[key] dataset = dataset_ops.Dataset.from_tensor_slices(components).apply( grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(20), None, window_size_func)) get_next = self.getNext(dataset) with self.assertRaises(errors.OutOfRangeError): batches = 0 while True: result = self.evaluate(get_next()) is_even = all(x % 2 == 0 for x in result) is_odd = all(x % 2 == 1 for x in result) self.assertTrue(is_even or is_odd) expected_batch_size = 5 if is_even else 10 self.assertEqual(expected_batch_size, result.shape[0]) batches += 1 self.assertEqual(batches, 15) def testSimple(self): components = np.random.randint(100, size=(200,)).astype(np.int64) dataset = dataset_ops.Dataset.from_tensor_slices( components).map(lambda x: x * x).apply( grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4), 4)) get_next = self.getNext(dataset) counts = [] with self.assertRaises(errors.OutOfRangeError): while True: result = self.evaluate(get_next()) self.assertTrue( all(x % 2 == 0 for x in result) or all(x % 2 == 1) for x in result) counts.append(result.shape[0]) self.assertEqual(len(components), sum(counts)) num_full_batches = len([c for c in counts if c == 4]) self.assertGreaterEqual(num_full_batches, 24) self.assertTrue(all(c == 4 for c in counts[:num_full_batches])) def testImmediateOutput(self): components = np.array( [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64) dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat( -1).apply( grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4), 4)) get_next = self.getNext(dataset) # The input is infinite, so this test demonstrates that: # 1. We produce output without having to consume the entire input, # 2. Different buckets can produce output at different rates, and # 3. For deterministic input, the output is deterministic. for _ in range(3): self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next())) self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next())) self.assertAllEqual([2, 2, 2, 2], self.evaluate(get_next())) self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next())) def testSmallGroups(self): components = np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], dtype=np.int64) dataset = dataset_ops.Dataset.from_tensor_slices(components).apply( grouping.group_by_window(lambda x: x % 2, lambda _, xs: xs.batch(4), 4)) get_next = self.getNext(dataset) self.assertAllEqual([0, 0, 0, 0], self.evaluate(get_next())) self.assertAllEqual([1, 1, 1, 1], self.evaluate(get_next())) # The small outputs at the end are deterministically produced in key # order. self.assertAllEqual([0, 0, 0], self.evaluate(get_next())) self.assertAllEqual([1], self.evaluate(get_next())) def testEmpty(self): dataset = dataset_ops.Dataset.range(4).apply( grouping.group_by_window(lambda _: 0, lambda _, xs: xs, 0)) get_next = self.getNext(dataset) with self.assertRaisesRegexp( errors.InvalidArgumentError, "Window size must be greater than zero, but got 0."): print(self.evaluate(get_next())) def testReduceFuncError(self): components = np.random.randint(100, size=(200,)).astype(np.int64) def reduce_func(_, xs): # Introduce an incorrect padded shape that cannot (currently) be # detected at graph construction time. return xs.padded_batch( 4, padded_shapes=(tensor_shape.TensorShape([]), constant_op.constant([5], dtype=dtypes.int64) * -1)) dataset = dataset_ops.Dataset.from_tensor_slices( components).map(lambda x: (x, ops.convert_to_tensor([x * x]))).apply( grouping.group_by_window(lambda x, _: x % 2, reduce_func, 32)) get_next = self.getNext(dataset) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(get_next()) def testConsumeWindowDatasetMoreThanOnce(self): components = np.random.randint(50, size=(200,)).astype(np.int64) def reduce_func(key, window): # Apply two different kinds of padding to the input: tight # padding, and quantized (to a multiple of 10) padding. return dataset_ops.Dataset.zip(( window.padded_batch( 4, padded_shapes=tensor_shape.TensorShape([None])), window.padded_batch( 4, padded_shapes=ops.convert_to_tensor([(key + 1) * 10])), )) dataset = dataset_ops.Dataset.from_tensor_slices( components ).map(lambda x: array_ops.fill([math_ops.cast(x, dtypes.int32)], x)).apply( grouping.group_by_window( lambda x: math_ops.cast(array_ops.shape(x)[0] // 10, dtypes.int64), reduce_func, 4)) get_next = self.getNext(dataset) counts = [] with self.assertRaises(errors.OutOfRangeError): while True: tight_result, multiple_of_10_result = self.evaluate(get_next()) self.assertEqual(0, multiple_of_10_result.shape[1] % 10) self.assertAllEqual(tight_result, multiple_of_10_result[:, :tight_result.shape[1]]) counts.append(tight_result.shape[0]) self.assertEqual(len(components), sum(counts)) def testShortCircuit(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.apply( grouping.group_by_window(lambda x: x, lambda _, window: window.batch(1), 1)) self.assertDatasetProduces( dataset, expected_output=[[i] for i in range(10)]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/group_by_window_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.sleep()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from tensorflow.python.data.experimental.ops import sleep from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class SleepTest(test_base.DatasetTestBase): def testSleep(self): self.skipTest("b/123597912") sleep_microseconds = 100 dataset = dataset_ops.Dataset.range(10).apply( sleep.sleep(sleep_microseconds)) next_element = self.getNext(dataset) start_time = time.time() for i in range(10): self.assertEqual(i, self.evaluate(next_element())) end_time = time.time() self.assertGreater(end_time - start_time, (10 * sleep_microseconds) / 1e6) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/sleep_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the private `_RebatchDataset` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized import numpy as np from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.experimental.ops import scan_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.lib.io import python_io from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def _flat_shapes(dataset): return nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)) @test_util.run_all_in_graph_and_eager_modes class RebatchDatasetTest(test_base.DatasetTestBase, parameterized.TestCase): drop_remainder_cases = [("WithDropRemainder", True), ("WithoutDropRemainder", False)] @parameterized.named_parameters(drop_remainder_cases) def testBasic(self, drop_remainder): dataset = dataset_ops.Dataset.range(1024).batch( 32, drop_remainder=drop_remainder) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[8] if drop_remainder else [None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [[k for k in range(i, i + 8)] for i in range(0, 1024, 8)] # pylint: disable=g-complex-comprehension self.assertDatasetProduces(rebatched_dataset, expected_output) def testScalarInputError(self): dataset = dataset_ops.Dataset.range(1024) distribute._RebatchDataset(dataset.batch(4), num_replicas=4) with self.assertRaisesRegexp(ValueError, "at least one dimension"): distribute._RebatchDataset(dataset, num_replicas=4) @parameterized.named_parameters(drop_remainder_cases) def testBatchNotDivisibleByNumReplicas(self, drop_remainder): dataset = dataset_ops.Dataset.range(1024).batch( 32, drop_remainder=drop_remainder) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=5) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [] i = 0 for _ in range(32): # number of steps # first four minibatches have seven elements for _ in range(4): expected_output.append([k for k in range(i, i + 7)]) i += 7 # last minibatch has four elements expected_output.append([k for k in range(i, i + 4)]) i += 4 self.assertDatasetProduces(rebatched_dataset, expected_output) def testBatchSizeNotDivisibleByNumReplicas2(self): dataset = dataset_ops.Dataset.range(32).batch(16, drop_remainder=True) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=5) # This will rebatch into sub-batches of size 4, since # ceil(16 / 5) = 4. However, that means only the first 4 replicas will get # data. expected_output = [[k for k in range(i, i + 4)] for i in range(0, 16, 4)] expected_output.extend([[]]) # Last replica gets an empty batch expected_output.extend( [[k for k in range(i, i + 4)] for i in range(16, 32, 4)]) expected_output.extend([[]]) # Last replica gets an empty batch self.assertDatasetProduces(rebatched_dataset, expected_output) def testTupleOutput(self): dataset = dataset_ops.Dataset.range(1024).map(lambda x: (x, x)).batch(32) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) expected_output = [([k for k in range(i, i + 8)], # pylint: disable=g-complex-comprehension [k for k in range(i, i + 8)]) for i in range(0, 1024, 8)] self.assertDatasetProduces(rebatched_dataset, expected_output) def testNestedDictionaryOutput(self): dataset = dataset_ops.Dataset.range(1024).map( lambda x: {"a": x, "b": {"c": x}}).batch(32) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) expected_output = [{"a": [k for k in range(i, i + 8)], # pylint: disable=g-complex-comprehension "b": {"c": [k for k in range(i, i + 8)]}} for i in range(0, 1024, 8)] self.assertDatasetProduces(rebatched_dataset, expected_output) @parameterized.named_parameters(drop_remainder_cases) def testFinalPartialBatch(self, drop_remainder): dataset = dataset_ops.Dataset.range(1032).batch( 32, drop_remainder=drop_remainder) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[8] if drop_remainder else [None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) # if drop_remainder, the final partial batch is dropped, even though it # makes up a complete minibatch. expected_output = [[k for k in range(i, i + 8)] for i in range(0, 1024, 8)] # pylint: disable=g-complex-comprehension if not drop_remainder: # The last partial batch of size 8 is split over 4 replicas expected_output.extend( [[k for k in range(i, i + 2)] for i in range(1024, 1032, 2)]) self.assertDatasetProduces(rebatched_dataset, expected_output) @parameterized.named_parameters(drop_remainder_cases) def testFinalPartialBatchAfterRebatch(self, drop_remainder): dataset = dataset_ops.Dataset.range(34).batch( 32, drop_remainder=drop_remainder) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[8] if drop_remainder else [None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [[k for k in range(i, i + 8)] for i in range(0, 32, 8)] # pylint: disable=g-complex-comprehension if not drop_remainder: # The last partial batch of size 2 is split over 4 replicas expected_output += [[32], [33], [], []] self.assertDatasetProduces(rebatched_dataset, expected_output) def testMultipleBatches(self): dataset = dataset_ops.Dataset.range(128).batch(4).batch(8) self.assertEqual([[None, None]], [ts.as_list() for ts in _flat_shapes(dataset)]) # Each element is a list of 8 elements where each element is a list of 4. expected_output = [[[j, j + 1, j + 2, j + 3] # pylint: disable=g-complex-comprehension for j in range(i, i + 32, 4)] # generates 8 elements for i in range(0, 128, 32)] self.assertDatasetProduces(dataset, expected_output) rebatched_dataset = distribute._RebatchDataset(dataset, 4) self.assertEqual([[None, None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) # Each element is a list of 2 elements where each element is a list of 4. expected_output = [[[j, j + 1, j + 2, j + 3] # pylint: disable=g-complex-comprehension for j in range(i, i + 8, 4)] # generates 2 elements for i in range(0, 128, 8)] self.assertDatasetProduces(rebatched_dataset, expected_output) def testMapAndBatch(self): dataset = dataset_ops.Dataset.range(1024).apply( batching.map_and_batch(math_ops.square, 32)) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [[k**2 for k in range(i, i + 8)] # pylint: disable=g-complex-comprehension for i in range(0, 1024, 8)] self.assertDatasetProduces(rebatched_dataset, expected_output) def testMapAndBatchWithCapturedInput(self): captured_t = variables.Variable(42) dataset = dataset_ops.Dataset.range(1024).apply( batching.map_and_batch(lambda x: captured_t, 32)) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [[42 for _ in range(i, i + 8)] # pylint: disable=g-complex-comprehension for i in range(0, 1024, 8)] self.evaluate(variables.global_variables_initializer()) self.assertDatasetProduces( rebatched_dataset, expected_output, requires_initialization=True) def testPaddedBatch(self): dataset = dataset_ops.Dataset.range(128).batch( 4, drop_remainder=True).padded_batch( 8, padded_shapes=[5]) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) # Each element is a list of 8 elements in which each element is a list of 5 # elements, first four are numbers and the last one is a padded zero. expected_output = [[[j, j + 1, j + 2, j + 3, 0] # pylint: disable=g-complex-comprehension for j in range(i, i + 32, 4)] # generates 8 elements for i in range(0, 128, 32)] self.assertDatasetProduces(dataset, expected_output) self.assertEqual([[None, 5]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) # Each element is a list of 2 elements in which each element is a list of 5 # elements, first four are numbers and the last one is a padded zero. expected_output = [[[j, j + 1, j + 2, j + 3, 0] # pylint: disable=g-complex-comprehension for j in range(i, i + 8, 4)] # generates 2 elements for i in range(0, 128, 8)] self.assertDatasetProduces(rebatched_dataset, expected_output) def testConcatenate(self): dataset1 = dataset_ops.Dataset.range(64).batch(8) dataset2 = dataset_ops.Dataset.range(32).batch(8) dataset = dataset1.concatenate(dataset2) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = ([[i, i + 1] for i in range(0, 64, 2)] + [[i, i + 1] for i in range(0, 32, 2)]) self.assertDatasetProduces(rebatched_dataset, expected_output) def testConcatenateDifferentShapes(self): dataset1 = dataset_ops.Dataset.range(64).batch(16) dataset2 = dataset_ops.Dataset.range(32).batch(8) dataset = dataset1.concatenate(dataset2) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = ([[i, i + 1, i + 2, i + 3] for i in range(0, 64, 4)] + [[i, i + 1] for i in range(0, 32, 2)]) self.assertDatasetProduces(rebatched_dataset, expected_output) def testZip(self): dataset1 = dataset_ops.Dataset.range(64).batch(8) dataset2 = dataset_ops.Dataset.range(32).batch(8) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None], [None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [([i, i + 1], [i, i + 1]) for i in range(0, 32, 2)] self.assertDatasetProduces(rebatched_dataset, expected_output) def testZipDifferentShapes(self): dataset1 = dataset_ops.Dataset.range(64).batch(16) dataset2 = dataset_ops.Dataset.range(32).batch(8) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None], [None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [([2 * i, 2 * i + 1, 2 * i + 2, 2 * i + 3], [i, i + 1]) for i in range(0, 32, 2)] self.assertDatasetProduces(rebatched_dataset, expected_output) def testFlatMapBatching(self): dataset = dataset_ops.Dataset.range(2).flat_map( lambda _: dataset_ops.Dataset.range(32).batch( # pylint: disable=g-long-lambda 32)) # Two elements where each element is range(32) expected_output = [[k for k in range(32)] for _ in range(2)] # pylint: disable=g-complex-comprehension self.assertDatasetProduces(dataset, expected_output) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) # Two elements where each element is a list of 4 elements where each element # is a list of 8. expected_output = [[k for k in range(i, i + 8)] # pylint: disable=g-complex-comprehension for _ in range(2) for i in range(0, 32, 8)] # generates 4 elements self.assertDatasetProduces(rebatched_dataset, expected_output) def testInterleaveBatching(self): dataset = dataset_ops.Dataset.range(2).interleave( lambda _: dataset_ops.Dataset.range(32).batch( # pylint: disable=g-long-lambda 32), cycle_length=2) # Two elements where each element is range(32) expected_output = [[k for k in range(32)] for _ in range(2)] # pylint: disable=g-complex-comprehension self.assertDatasetProduces(dataset, expected_output) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [[k for k in range(i, i + 8)] for i in range(0, 32, 8)] expected_output += expected_output self.assertDatasetProduces(rebatched_dataset, expected_output) def testParallelInterleaveBatching(self): dataset = dataset_ops.Dataset.range(2).interleave( lambda _: dataset_ops.Dataset.range(32).batch( # pylint: disable=g-long-lambda 32), cycle_length=2, num_parallel_calls=2) # Two elements where each element is range(32) expected_output = [[k for k in range(32)] for _ in range(2)] # pylint: disable=g-complex-comprehension self.assertDatasetProduces(dataset, expected_output) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [[k for k in range(i, i + 8)] for i in range(0, 32, 8)] expected_output += expected_output self.assertDatasetProduces(rebatched_dataset, expected_output) def testGroupByWindowStaticBatch(self): dataset = dataset_ops.Dataset.from_tensor_slices( [[array_ops.constant(i, dtype=dtypes.int64)] * 3 for i in range(40)]) reduce_fn = lambda bucket_id, ds: ds.batch( # pylint: disable=g-long-lambda batch_size=10) dataset = dataset.apply( grouping.group_by_window( key_func=lambda x: x[0] % 4, reduce_func=reduce_fn, window_size=10)) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=2) self.assertEqual([[None, 3]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) # pylint: disable=g-complex-comprehension expected_output = [[[j + i * 4 + k * 20] * 3 for i in range(5)] for j in range(4) for k in range(2)] self.assertDatasetProduces(rebatched_dataset, expected_output) def testGroupByWindowDynamicBatch(self): # {0, 1, 0, 1, ...} dataset = dataset_ops.Dataset.range(40).map(lambda x: x % 2) def reduce_fn(key, ds): # key == 0 -> .batch(5) # key == 1 -> .batch(10) return ds.batch(batch_size=(key + 1) * 5) dataset = dataset.apply( grouping.group_by_window( key_func=lambda x: x, reduce_func=reduce_fn, window_size=10)) dataset = distribute._RebatchDataset(dataset, num_replicas=2) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(dataset)]) # The batches of 5 (value == 0) will be split into minibatches of (3, 2) and # the batches of 10 (value == 1) split into minibatches of (5, 5) # [(batch_size, value), ...] pairs = [(3, 0), (2, 0), (3, 0), (2, 0), (5, 1), (5, 1)] pairs = pairs * 2 expected_output = [[value] * batch_size for batch_size, value in pairs] self.assertDatasetProduces(dataset, expected_output) def testGroupByWindowDynamicBatchWithPartialBatch(self): # {0, 1, 0, 1, ...} dataset = dataset_ops.Dataset.range(40).map(lambda x: x % 2) def reduce_fn(key, ds): # key == 0 -> .batch(5) # key == 1 -> .batch(10) return ds.batch(batch_size=(key + 1) * 5) dataset = dataset.apply( grouping.group_by_window( key_func=lambda x: x, reduce_func=reduce_fn, window_size=11)) dataset = distribute._RebatchDataset(dataset, num_replicas=2) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(dataset)]) pairs = [(3, 0), (2, 0), (3, 0), (2, 0), (1, 0), (0, 0), (5, 1), (5, 1), (1, 1), (0, 1), (3, 0), (2, 0), (2, 0), (2, 0), (5, 1), (4, 1)] expected_output = [[value] * batch_size for batch_size, value in pairs] self.assertDatasetProduces(dataset, expected_output) def testGroupByWindowDynamicBatchWithPartialBatchWithDropRemainder(self): # This test exercises nested batch functionality, dynamic batch size # and drop_remainder=True together. dataset = dataset_ops.Dataset.range(40).map(lambda x: x % 2) def reduce_fn(key, ds): # key == 0 -> .batch(5) # key == 1 -> .batch(10) return ds.batch(batch_size=(key + 1) * 5, drop_remainder=True) dataset = dataset.apply( grouping.group_by_window( key_func=lambda x: x, reduce_func=reduce_fn, window_size=11)) dataset = distribute._RebatchDataset(dataset, num_replicas=2) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(dataset)]) # The batches of 5 (value == 0) will be split into minibatches of (3, 2) and # the batches of 10 (value == 1) split into minibatches of (5, 5) # [(batch_size, value), ...] pairs = [(3, 0), (2, 0), (3, 0), (2, 0), (5, 1), (5, 1), (3, 0), (2, 0)] expected_output = [[value] * batch_size for batch_size, value in pairs] self.assertDatasetProduces(dataset, expected_output) def testScanAfterBatch(self): dataset = dataset_ops.Dataset.range(40).batch(10).apply( scan_ops.scan(np.int64(2), lambda state, value: (state, value * state))) dataset = distribute._RebatchDataset(dataset, num_replicas=2) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(dataset)]) expected_output = [[i * 2 for i in range(j*5, (j+1)*5)] for j in range(8)] # pylint: disable=g-complex-comprehension self.assertDatasetProduces(dataset, expected_output) def testMakeBatchedFeaturesDataset(self): # Set up fn = os.path.join(self.get_temp_dir(), "tf_record.txt") writer = python_io.TFRecordWriter(fn) for i in range(1024): writer.write( example_pb2.Example( features=feature_pb2.Features( feature={ "value": feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[i])) })).SerializeToString()) writer.close() dataset = readers.make_batched_features_dataset( file_pattern=fn, batch_size=32, features={"value": parsing_ops.FixedLenFeature([], dtypes.int64)}, shuffle=False, num_epochs=1, drop_final_batch=False) rebatched_dataset = distribute._RebatchDataset(dataset, num_replicas=4) self.assertEqual([[None]], [ts.as_list() for ts in _flat_shapes(rebatched_dataset)]) expected_output = [{ "value": [k for k in range(i, i + 8)] } for i in range(0, 1024, 8)] # pylint: disable=g-complex-comprehension self.assertDatasetProduces(rebatched_dataset, expected_output) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/rebatch_dataset_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for testing `tf.data.experimental.SqlDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sqlite3 from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.platform import test class SqlDatasetTestBase(test_base.DatasetTestBase): """Base class for setting up and testing SqlDataset.""" def _createSqlDataset(self, query, output_types, driver_name="sqlite", num_repeats=1): dataset = readers.SqlDataset(driver_name, self.data_source_name, query, output_types).repeat(num_repeats) return dataset def setUp(self): self.data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite") conn = sqlite3.connect(self.data_source_name) c = conn.cursor() c.execute("DROP TABLE IF EXISTS students") c.execute("DROP TABLE IF EXISTS people") c.execute("DROP TABLE IF EXISTS townspeople") c.execute( "CREATE TABLE IF NOT EXISTS students (id INTEGER NOT NULL PRIMARY KEY, " "first_name VARCHAR(100), last_name VARCHAR(100), motto VARCHAR(100), " "school_id VARCHAR(100), favorite_nonsense_word VARCHAR(100), " "desk_number INTEGER, income INTEGER, favorite_number INTEGER, " "favorite_big_number INTEGER, favorite_negative_number INTEGER, " "favorite_medium_sized_number INTEGER, brownie_points INTEGER, " "account_balance INTEGER, registration_complete INTEGER)") c.executemany( "INSERT INTO students (first_name, last_name, motto, school_id, " "favorite_nonsense_word, desk_number, income, favorite_number, " "favorite_big_number, favorite_negative_number, " "favorite_medium_sized_number, brownie_points, account_balance, " "registration_complete) " "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", [("John", "Doe", "Hi!", "123", "n\0nsense", 9, 0, 2147483647, 9223372036854775807, -2, 32767, 0, 0, 1), ("Jane", "Moe", "Hi again!", "1000", "nonsense\0", 127, -20000, -2147483648, -9223372036854775808, -128, -32768, 255, 65535, 0)]) c.execute( "CREATE TABLE IF NOT EXISTS people (id INTEGER NOT NULL PRIMARY KEY, " "first_name VARCHAR(100), last_name VARCHAR(100), state VARCHAR(100))") c.executemany( "INSERT INTO PEOPLE (first_name, last_name, state) VALUES (?, ?, ?)", [("Benjamin", "Franklin", "Pennsylvania"), ("John", "Doe", "California")]) c.execute( "CREATE TABLE IF NOT EXISTS townspeople (id INTEGER NOT NULL PRIMARY " "KEY, first_name VARCHAR(100), last_name VARCHAR(100), victories " "FLOAT, accolades FLOAT, triumphs FLOAT)") c.executemany( "INSERT INTO townspeople (first_name, last_name, victories, " "accolades, triumphs) VALUES (?, ?, ?, ?, ?)", [("George", "Washington", 20.00, 1331241.321342132321324589798264627463827647382647382643874, 9007199254740991.0), ("John", "Adams", -19.95, 1331241321342132321324589798264627463827647382647382643874.0, 9007199254740992.0)]) conn.commit() conn.close()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/sql_dataset_test_base.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.Counter`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import counter from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class CounterTest(test_base.DatasetTestBase): def testCounter(self): """Test dataset construction using `count`.""" dataset = counter.Counter(start=3, step=4) self.assertEqual( [], dataset_ops.get_legacy_output_shapes(dataset).as_list()) self.assertEqual(dtypes.int64, dataset_ops.get_legacy_output_types(dataset)) get_next = self.getNext(dataset) negative_dataset = counter.Counter(start=0, step=-1) negative_get_next = self.getNext(negative_dataset) self.assertEqual(3, self.evaluate(get_next())) self.assertEqual(3 + 4, self.evaluate(get_next())) self.assertEqual(3 + 2 * 4, self.evaluate(get_next())) self.assertEqual(0, self.evaluate(negative_get_next())) self.assertEqual(-1, self.evaluate(negative_get_next())) self.assertEqual(-2, self.evaluate(negative_get_next())) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/counter_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for testing reader datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import zlib from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import readers as core_readers from tensorflow.python.framework import dtypes from tensorflow.python.lib.io import python_io from tensorflow.python.ops import parsing_ops from tensorflow.python.util import compat class FixedLengthRecordDatasetTestBase(test_base.DatasetTestBase): """Base class for setting up and testing FixedLengthRecordDataset.""" def setUp(self): super(FixedLengthRecordDatasetTestBase, self).setUp() self._num_files = 2 self._num_records = 7 self._header_bytes = 5 self._record_bytes = 3 self._footer_bytes = 2 def _record(self, f, r): return compat.as_bytes(str(f * 2 + r) * self._record_bytes) def _createFiles(self): filenames = [] for i in range(self._num_files): fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i) filenames.append(fn) with open(fn, "wb") as f: f.write(b"H" * self._header_bytes) for j in range(self._num_records): f.write(self._record(i, j)) f.write(b"F" * self._footer_bytes) return filenames class MakeBatchedFeaturesDatasetTestBase(test_base.DatasetTestBase): """Base class for setting up and testing `make_batched_features_dataset`.""" def setUp(self): super(MakeBatchedFeaturesDatasetTestBase, self).setUp() self._num_files = 2 self._num_records = 7 self.test_filenames = self._createFiles() def make_batch_feature(self, filenames, num_epochs, batch_size, label_key=None, reader_num_threads=1, parser_num_threads=1, shuffle=False, shuffle_seed=None, drop_final_batch=False): self.filenames = filenames self.num_epochs = num_epochs self.batch_size = batch_size return readers.make_batched_features_dataset( file_pattern=self.filenames, batch_size=self.batch_size, features={ "file": parsing_ops.FixedLenFeature([], dtypes.int64), "record": parsing_ops.FixedLenFeature([], dtypes.int64), "keywords": parsing_ops.VarLenFeature(dtypes.string), "label": parsing_ops.FixedLenFeature([], dtypes.string), }, label_key=label_key, reader=core_readers.TFRecordDataset, num_epochs=self.num_epochs, shuffle=shuffle, shuffle_seed=shuffle_seed, reader_num_threads=reader_num_threads, parser_num_threads=parser_num_threads, drop_final_batch=drop_final_batch) def _record(self, f, r, l): example = example_pb2.Example( features=feature_pb2.Features( feature={ "file": feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[f])), "record": feature_pb2.Feature( int64_list=feature_pb2.Int64List(value=[r])), "keywords": feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=self._get_keywords(f, r))), "label": feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[compat.as_bytes(l)])) })) return example.SerializeToString() def _get_keywords(self, f, r): num_keywords = 1 + (f + r) % 2 keywords = [] for index in range(num_keywords): keywords.append(compat.as_bytes("keyword%d" % index)) return keywords def _sum_keywords(self, num_files): sum_keywords = 0 for i in range(num_files): for j in range(self._num_records): sum_keywords += 1 + (i + j) % 2 return sum_keywords def _createFiles(self): filenames = [] for i in range(self._num_files): fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) filenames.append(fn) writer = python_io.TFRecordWriter(fn) for j in range(self._num_records): writer.write(self._record(i, j, "fake-label")) writer.close() return filenames def _run_actual_batch(self, outputs, label_key_provided=False): if label_key_provided: # outputs would be a tuple of (feature dict, label) features, label = self.evaluate(outputs()) else: features = self.evaluate(outputs()) label = features["label"] file_out = features["file"] keywords_indices = features["keywords"].indices keywords_values = features["keywords"].values keywords_dense_shape = features["keywords"].dense_shape record = features["record"] return ([ file_out, keywords_indices, keywords_values, keywords_dense_shape, record, label ]) def _next_actual_batch(self, label_key_provided=False): return self._run_actual_batch(self.outputs, label_key_provided) def _interleave(self, iterators, cycle_length): pending_iterators = iterators open_iterators = [] num_open = 0 for i in range(cycle_length): if pending_iterators: open_iterators.append(pending_iterators.pop(0)) num_open += 1 while num_open: for i in range(min(cycle_length, len(open_iterators))): if open_iterators[i] is None: continue try: yield next(open_iterators[i]) except StopIteration: if pending_iterators: open_iterators[i] = pending_iterators.pop(0) else: open_iterators[i] = None num_open -= 1 def _next_expected_batch(self, file_indices, batch_size, num_epochs, cycle_length=1): def _next_record(file_indices): for j in file_indices: for i in range(self._num_records): yield j, i, compat.as_bytes("fake-label") def _next_record_interleaved(file_indices, cycle_length): return self._interleave([_next_record([i]) for i in file_indices], cycle_length) file_batch = [] keywords_batch_indices = [] keywords_batch_values = [] keywords_batch_max_len = 0 record_batch = [] batch_index = 0 label_batch = [] for _ in range(num_epochs): if cycle_length == 1: next_records = _next_record(file_indices) else: next_records = _next_record_interleaved(file_indices, cycle_length) for record in next_records: f = record[0] r = record[1] label_batch.append(record[2]) file_batch.append(f) record_batch.append(r) keywords = self._get_keywords(f, r) keywords_batch_values.extend(keywords) keywords_batch_indices.extend( [[batch_index, i] for i in range(len(keywords))]) batch_index += 1 keywords_batch_max_len = max(keywords_batch_max_len, len(keywords)) if len(file_batch) == batch_size: yield [ file_batch, keywords_batch_indices, keywords_batch_values, [batch_size, keywords_batch_max_len], record_batch, label_batch ] file_batch = [] keywords_batch_indices = [] keywords_batch_values = [] keywords_batch_max_len = 0 record_batch = [] batch_index = 0 label_batch = [] if file_batch: yield [ file_batch, keywords_batch_indices, keywords_batch_values, [len(file_batch), keywords_batch_max_len], record_batch, label_batch ] def verify_records(self, batch_size, file_index=None, num_epochs=1, label_key_provided=False, interleave_cycle_length=1): if file_index is not None: file_indices = [file_index] else: file_indices = range(self._num_files) for expected_batch in self._next_expected_batch( file_indices, batch_size, num_epochs, cycle_length=interleave_cycle_length): actual_batch = self._next_actual_batch( label_key_provided=label_key_provided) for i in range(len(expected_batch)): self.assertAllEqual(expected_batch[i], actual_batch[i]) class TextLineDatasetTestBase(test_base.DatasetTestBase): """Base class for setting up and testing TextLineDataset.""" def _lineText(self, f, l): return compat.as_bytes("%d: %d" % (f, l)) def _createFiles(self, num_files, num_lines, crlf=False, compression_type=None): filenames = [] for i in range(num_files): fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i) filenames.append(fn) contents = [] for j in range(num_lines): contents.append(self._lineText(i, j)) # Always include a newline after the record unless it is # at the end of the file, in which case we include it if j + 1 != num_lines or i == 0: contents.append(b"\r\n" if crlf else b"\n") contents = b"".join(contents) if not compression_type: with open(fn, "wb") as f: f.write(contents) elif compression_type == "GZIP": with gzip.GzipFile(fn, "wb") as f: f.write(contents) elif compression_type == "ZLIB": contents = zlib.compress(contents) with open(fn, "wb") as f: f.write(contents) else: raise ValueError("Unsupported compression_type", compression_type) return filenames class TFRecordDatasetTestBase(test_base.DatasetTestBase): """Base class for setting up and testing TFRecordDataset.""" def _interleave(self, iterators, cycle_length): pending_iterators = iterators open_iterators = [] num_open = 0 for i in range(cycle_length): if pending_iterators: open_iterators.append(pending_iterators.pop(0)) num_open += 1 while num_open: for i in range(min(cycle_length, len(open_iterators))): if open_iterators[i] is None: continue try: yield next(open_iterators[i]) except StopIteration: if pending_iterators: open_iterators[i] = pending_iterators.pop(0) else: open_iterators[i] = None num_open -= 1 def _next_expected_batch(self, file_indices, batch_size, num_epochs, cycle_length, drop_final_batch, use_parser_fn): def _next_record(file_indices): for j in file_indices: for i in range(self._num_records): yield j, i def _next_record_interleaved(file_indices, cycle_length): return self._interleave([_next_record([i]) for i in file_indices], cycle_length) record_batch = [] batch_index = 0 for _ in range(num_epochs): if cycle_length == 1: next_records = _next_record(file_indices) else: next_records = _next_record_interleaved(file_indices, cycle_length) for f, r in next_records: record = self._record(f, r) if use_parser_fn: record = record[1:] record_batch.append(record) batch_index += 1 if len(record_batch) == batch_size: yield record_batch record_batch = [] batch_index = 0 if record_batch and not drop_final_batch: yield record_batch def _verify_records(self, outputs, batch_size, file_index, num_epochs, interleave_cycle_length, drop_final_batch, use_parser_fn): if file_index is not None: if isinstance(file_index, list): file_indices = file_index else: file_indices = [file_index] else: file_indices = range(self._num_files) for expected_batch in self._next_expected_batch( file_indices, batch_size, num_epochs, interleave_cycle_length, drop_final_batch, use_parser_fn): actual_batch = self.evaluate(outputs()) self.assertAllEqual(expected_batch, actual_batch) def setUp(self): super(TFRecordDatasetTestBase, self).setUp() self._num_files = 2 self._num_records = 7 self.test_filenames = self._createFiles() def _record(self, f, r): return compat.as_bytes("Record %d of file %d" % (r, f)) def _createFiles(self): filenames = [] for i in range(self._num_files): fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) filenames.append(fn) writer = python_io.TFRecordWriter(fn) for j in range(self._num_records): writer.write(self._record(i, j)) writer.close() return filenames
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/reader_dataset_ops_test_base.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the private `MatchingFilesDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import shutil import tempfile from tensorflow.python.data.experimental.ops import matching_files from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test from tensorflow.python.util import compat @test_util.run_all_in_graph_and_eager_modes class MatchingFilesDatasetTest(test_base.DatasetTestBase): def setUp(self): self.tmp_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmp_dir, ignore_errors=True) def _touchTempFiles(self, filenames): for filename in filenames: open(os.path.join(self.tmp_dir, filename), 'a').close() def testNonExistingDirectory(self): """Test the MatchingFiles dataset with a non-existing directory.""" self.tmp_dir = os.path.join(self.tmp_dir, 'nonexistingdir') dataset = matching_files.MatchingFilesDataset( os.path.join(self.tmp_dir, '*')) self.assertDatasetProduces( dataset, expected_error=(errors.NotFoundError, '')) def testEmptyDirectory(self): """Test the MatchingFiles dataset with an empty directory.""" dataset = matching_files.MatchingFilesDataset( os.path.join(self.tmp_dir, '*')) self.assertDatasetProduces( dataset, expected_error=(errors.NotFoundError, '')) def testSimpleDirectory(self): """Test the MatchingFiles dataset with a simple directory.""" filenames = ['a', 'b', 'c'] self._touchTempFiles(filenames) dataset = matching_files.MatchingFilesDataset( os.path.join(self.tmp_dir, '*')) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(os.path.join(self.tmp_dir, filename)) for filename in filenames ], assert_items_equal=True) def testFileSuffixes(self): """Test the MatchingFiles dataset using the suffixes of filename.""" filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc'] self._touchTempFiles(filenames) dataset = matching_files.MatchingFilesDataset( os.path.join(self.tmp_dir, '*.py')) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(os.path.join(self.tmp_dir, filename)) for filename in filenames[1:-1] ], assert_items_equal=True) def testFileMiddles(self): """Test the MatchingFiles dataset using the middles of filename.""" filenames = ['aa.txt', 'bb.py', 'bbc.pyc', 'cc.pyc'] self._touchTempFiles(filenames) dataset = matching_files.MatchingFilesDataset( os.path.join(self.tmp_dir, 'b*.py*')) self.assertDatasetProduces( dataset, expected_output=[ compat.as_bytes(os.path.join(self.tmp_dir, filename)) for filename in filenames[1:3] ], assert_items_equal=True) def testNestedDirectories(self): """Test the MatchingFiles dataset with nested directories.""" filenames = [] width = 8 depth = 4 for i in range(width): for j in range(depth): new_base = os.path.join(self.tmp_dir, str(i), *[str(dir_name) for dir_name in range(j)]) os.makedirs(new_base) child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log'] for f in child_files: filename = os.path.join(new_base, f) filenames.append(filename) open(filename, 'w').close() patterns = [ os.path.join(self.tmp_dir, os.path.join(*['**' for _ in range(depth)]), suffix) for suffix in ['*.txt', '*.log'] ] dataset = matching_files.MatchingFilesDataset(patterns) next_element = self.getNext(dataset) expected_filenames = [ compat.as_bytes(filename) for filename in filenames if filename.endswith('.txt') or filename.endswith('.log') ] actual_filenames = [] while True: try: actual_filenames.append(compat.as_bytes(self.evaluate(next_element()))) except errors.OutOfRangeError: break self.assertItemsEqual(expected_filenames, actual_filenames) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/matching_files_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.bucket_by_sequence_length().""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random from absl.testing import parameterized from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import test def _element_length_fn(x, y=None): del y return array_ops.shape(x)[0] def _to_sparse_tensor(record): return sparse_tensor.SparseTensor(**record) def _format_record(array, sparse): if sparse: return { "values": array, "indices": [[i] for i in range(len(array))], "dense_shape": (len(array),) } return array def _get_record_type(sparse): if sparse: return { "values": dtypes.int64, "indices": dtypes.int64, "dense_shape": dtypes.int64 } return dtypes.int32 def _get_record_shape(sparse): if sparse: return { "values": tensor_shape.TensorShape([None,]), "indices": tensor_shape.TensorShape([None, 1]), "dense_shape": tensor_shape.TensorShape([1,]) } return tensor_shape.TensorShape([None]) @test_util.run_all_in_graph_and_eager_modes class BucketBySequenceLengthTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters( ("WithoutPadding", True), ("WithPadding", False), ) def testBucketDropReminder(self, param_no_padding): boundaries = [10, 20, 30] batch_sizes = [10, 8, 4, 2] lengths = [8, 13, 25, 35] n_bucket_elements = [28, 7, 6, 5] n_expected_batches = 5 # Expected sequence lengths of the individual batches. expected_lengths = [] # Expected sum of all batches with an equal sequence length. # <seq-length>: <expected-total-sum> expected_sums = {} # Expected batch sizes of batches depending on the sequence length. # <seq-length>: [batch1_size, ..., batchN_size] expected_batch_sizes = {} for length, batch_size, bucket_elements in zip(lengths, batch_sizes, n_bucket_elements): # Calculate the expected sum across all batches of a specific sequence length. expected_sums[length] = \ (bucket_elements - bucket_elements % batch_size) * length # Calculate the expected occurrence of individual batch sizes. expected_batch_sizes[length] = \ [batch_size] * (bucket_elements // batch_size) # Calculate the expected occurence of individual sequence lengths. expected_lengths.extend([length] * (bucket_elements // batch_size)) def build_dataset(sparse): def _generator(): # Produce 1 batch for each bucket elements = [] for bucket_elements, length in zip(n_bucket_elements, lengths): # Using only full sequences (opposed to the strategy employed in `testBucket`) makes # checking the sum a lot easier. record_len = length for _ in range(bucket_elements): elements.append([1] * record_len) random.shuffle(elements) for el in elements: yield (_format_record(el, sparse),) dataset = dataset_ops.Dataset.from_generator( _generator, (_get_record_type(sparse),), (_get_record_shape(sparse),)) if sparse: dataset = dataset.map(lambda x: (_to_sparse_tensor(x),)) return dataset def _test_bucket_by_padding(no_padding): dataset = build_dataset(sparse=no_padding) dataset = dataset.apply( grouping.bucket_by_sequence_length( _element_length_fn, boundaries, batch_sizes, no_padding=no_padding, drop_remainder=True)) get_next = self.getNext(dataset) batches = [] for _ in range(n_expected_batches): batch, = self.evaluate(get_next()) batches.append(batch) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) generated_lengths = [] # <seq-length>: <total-sum> generated_sums = {} # <seq-length>: [<batch_size>, ...] generated_batch_sizes = {} for length, batch_size, bucket_elements in zip(lengths, batch_sizes, n_bucket_elements): # Initialize the sum across all batches. generated_sums[length] = 0 # Initialize the individual batch sizes. generated_batch_sizes[length] = [] for batch in batches: shape = batch.dense_shape if no_padding else batch.shape length = shape[1] generated_lengths.append(length) batch_size = shape[0] generated_batch_sizes[length].append(batch_size) batch_sum = batch.values.sum() if no_padding else batch.sum() generated_sums[length] += batch_sum for l in lengths: # Make sure the sum of the batch contents is correct for the individual sequence lengths. self.assertEqual( generated_sums[l], expected_sums[l], "Tensor sums did not match! " "expected: {}, generated: {}".format(expected_sums, generated_sums)) # Make sure the individual batch sizes are generated as expected. self.assertEqual( sorted(generated_batch_sizes[l]), sorted(expected_batch_sizes[l]), "Batch-sizes did not match! " "expected: {}, generated: {}".format( sorted(expected_batch_sizes[l]), sorted(generated_batch_sizes[l]))) # Make sure the generated sequence lengths appear as often as expected. self.assertEqual( sorted(generated_lengths), sorted(expected_lengths), "The generated sequence lengths did not match! " "expected: {}, generated: {}".format( sorted(expected_lengths), sorted(generated_lengths))) _test_bucket_by_padding(param_no_padding) @parameterized.named_parameters( ("WithoutPadding", True), ("WithPadding", False), ) def testBucket(self, param_no_padding): boundaries = [10, 20, 30] batch_sizes = [10, 8, 4, 2] lengths = [8, 13, 25, 35] def build_dataset(sparse): def _generator(): # Produce 1 batch for each bucket elements = [] for batch_size, length in zip(batch_sizes, lengths): record_len = length - 1 for _ in range(batch_size): elements.append([1] * record_len) record_len = length random.shuffle(elements) for el in elements: yield (_format_record(el, sparse),) dataset = dataset_ops.Dataset.from_generator( _generator, (_get_record_type(sparse),), (_get_record_shape(sparse),)) if sparse: dataset = dataset.map(lambda x: (_to_sparse_tensor(x),)) return dataset def _test_bucket_by_padding(no_padding): dataset = build_dataset(sparse=no_padding) dataset = dataset.apply( grouping.bucket_by_sequence_length( _element_length_fn, boundaries, batch_sizes, no_padding=no_padding)) get_next = self.getNext(dataset) batches = [] for _ in range(4): batch, = self.evaluate(get_next()) batches.append(batch) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) batch_sizes_val = [] lengths_val = [] for batch in batches: shape = batch.dense_shape if no_padding else batch.shape batch_size = shape[0] length = shape[1] batch_sizes_val.append(batch_size) lengths_val.append(length) if not context.executing_eagerly(): sum_check = batch.values.sum() if no_padding else batch.sum() self.assertEqual(sum_check, batch_size * length - 1) self.assertEqual(sum(batch_sizes_val), sum(batch_sizes)) self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val)) self.assertEqual(sorted(lengths), sorted(lengths_val)) _test_bucket_by_padding(param_no_padding) def testPadToBoundary(self): boundaries = [10, 20, 30] batch_sizes = [10, 8, 4, 2] lengths = [8, 13, 25] def element_gen(): # Produce 1 batch for each bucket elements = [] for batch_size, length in zip(batch_sizes[:-1], lengths): for _ in range(batch_size): elements.append([1] * length) random.shuffle(elements) for el in elements: yield (el,) for _ in range(batch_sizes[-1]): el = [1] * (boundaries[-1] + 5) yield (el,) element_len = lambda el: array_ops.shape(el)[0] dataset = dataset_ops.Dataset.from_generator( element_gen, (dtypes.int64,), ([None],)).apply( grouping.bucket_by_sequence_length( element_len, boundaries, batch_sizes, pad_to_bucket_boundary=True)) get_next = self.getNext(dataset) batches = [] for _ in range(3): batch, = self.evaluate(get_next()) batches.append(batch) with self.assertRaisesOpError("bucket_boundaries"): self.evaluate(get_next()) batch_sizes_val = [] lengths_val = [] for batch in batches: batch_size = batch.shape[0] length = batch.shape[1] batch_sizes_val.append(batch_size) lengths_val.append(length) batch_sizes = batch_sizes[:-1] self.assertEqual(sum(batch_sizes_val), sum(batch_sizes)) self.assertEqual(sorted(batch_sizes), sorted(batch_sizes_val)) self.assertEqual([boundary - 1 for boundary in sorted(boundaries)], sorted(lengths_val)) def testPadToBoundaryNoExtraneousPadding(self): boundaries = [3, 7, 11] batch_sizes = [2, 2, 2, 2] lengths = range(1, 11) def element_gen(): for length in lengths: yield ([1] * length,) element_len = lambda element: array_ops.shape(element)[0] dataset = dataset_ops.Dataset.from_generator( element_gen, (dtypes.int64,), ([None],)).apply( grouping.bucket_by_sequence_length( element_len, boundaries, batch_sizes, pad_to_bucket_boundary=True)) get_next = self.getNext(dataset) batches = [] for _ in range(5): batch, = self.evaluate(get_next()) batches.append(batch) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) self.assertAllEqual(batches[0], [[1, 0], [1, 1]]) self.assertAllEqual(batches[1], [[1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0]]) self.assertAllEqual(batches[2], [[1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1]]) self.assertAllEqual(batches[3], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]) self.assertAllEqual(batches[4], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) @parameterized.named_parameters( ("WithoutPadding", True), ("WithPadding", False), ) def testTupleElements(self, param_no_padding): def build_dataset(sparse): def _generator(): text = [[1, 2, 3], [3, 4, 5, 6, 7], [1, 2], [8, 9, 0, 2, 3]] label = [1, 2, 1, 2] for x, y in zip(text, label): yield (_format_record(x, sparse), y) dataset = dataset_ops.Dataset.from_generator( generator=_generator, output_types=(_get_record_type(sparse), dtypes.int32), output_shapes=(_get_record_shape(sparse), tensor_shape.TensorShape([]))) if sparse: dataset = dataset.map(lambda x, y: (_to_sparse_tensor(x), y)) return dataset def _test_tuple_elements_by_padding(no_padding): dataset = build_dataset(sparse=no_padding) dataset = dataset.apply(grouping.bucket_by_sequence_length( element_length_func=_element_length_fn, bucket_batch_sizes=[2, 2, 2], bucket_boundaries=[0, 8], no_padding=no_padding)) shapes = dataset_ops.get_legacy_output_shapes(dataset) self.assertEqual([None, None], shapes[0].as_list()) self.assertEqual([None], shapes[1].as_list()) _test_tuple_elements_by_padding(param_no_padding) @parameterized.named_parameters( ("DoDropRemainder", True), ("DoNotDropRemainder", False), ) def testBucketSparse(self, param_drop_remainder): # pylint: disable=g-doc-args """Tests bucketing of sparse tensors (case where `no_padding` == True). Test runs on following dataset: [ [0], [0, 1], [0, 1, 2] ... [0, ..., max_len - 1] ] Sequences are bucketed by length and batched with `batch_size` < `bucket_size`. """ min_len = 0 max_len = 100 batch_size = 7 bucket_size = 10 def _build_dataset(): input_data = [range(i+1) for i in range(min_len, max_len)] def generator_fn(): for record in input_data: yield _format_record(record, sparse=True) dataset = dataset_ops.Dataset.from_generator( generator=generator_fn, output_types=_get_record_type(sparse=True)) dataset = dataset.map(_to_sparse_tensor) return dataset def _compute_expected_batches(drop_remainder): """Computes expected batch outputs and stores in a set.""" all_expected_sparse_tensors = set() for bucket_start_len in range(min_len, max_len, bucket_size): if drop_remainder: batch_offsets = [0] else: batch_offsets = range(0, bucket_size, batch_size) for batch_offset in batch_offsets: batch_start_len = bucket_start_len + batch_offset batch_end_len = min(batch_start_len + batch_size, bucket_start_len + bucket_size) expected_indices = [] expected_values = [] for length in range(batch_start_len, batch_end_len): for val in range(length + 1): expected_indices.append((length - batch_start_len, val)) expected_values.append(val) expected_sprs_tensor = (tuple(expected_indices), tuple(expected_values)) all_expected_sparse_tensors.add(expected_sprs_tensor) return all_expected_sparse_tensors def _compute_batches(dataset): """Computes actual batch outputs of dataset and stores in a set.""" batch = self.getNext(dataset) all_sparse_tensors = set() with self.assertRaises(errors.OutOfRangeError): while True: output = self.evaluate(batch()) sprs_tensor = (tuple([tuple(idx) for idx in output.indices]), tuple(output.values)) all_sparse_tensors.add(sprs_tensor) return all_sparse_tensors dataset = _build_dataset() boundaries = range(min_len + bucket_size + 1, max_len, bucket_size) dataset = dataset.apply( grouping.bucket_by_sequence_length( _element_length_fn, boundaries, [batch_size] * (len(boundaries) + 1), no_padding=True, drop_remainder=param_drop_remainder)) batches = _compute_batches(dataset) expected_batches = _compute_expected_batches(param_drop_remainder) self.assertEqual(batches, expected_batches) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.prefetch_to_device()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.experimental.ops import prefetching_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import structure from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.platform import test # TODO(b/117581999): add eager coverage when supported. class PrefetchToDeviceTest(test_base.DatasetTestBase): @test_util.deprecated_graph_mode_only def testPrefetchToDevice(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.prefetch_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue(structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testPrefetchToSameDevice(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.prefetch_to_device( "/job:localhost/replica:0/task:0/device:CPU:0")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue(structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testPrefetchDictToDevice(self): host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x}) device_dataset = host_dataset.apply( prefetching_ops.prefetch_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue(structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element["a"].dtype) self.assertEqual([], next_element["a"].shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual({"a": i}, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testPrefetchSparseTensorsToDevice(self): def make_tensor(i): return sparse_tensor.SparseTensorValue( indices=[[0, 0]], values=(i*[1]), dense_shape=[2, 2]) host_dataset = dataset_ops.Dataset.range(10).map(make_tensor) device_dataset = host_dataset.apply( prefetching_ops.prefetch_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue(structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): actual = self.evaluate(next_element) self.assertAllEqual([i], actual.values) self.assertAllEqual([[0, 0]], actual.indices) self.assertAllEqual([2, 2], actual.dense_shape) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testPrefetchToDeviceGpu(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.prefetch_to_device("/gpu:0")) self.assertDatasetProduces(device_dataset, list(range(10))) # @test_util.deprecated_graph_mode_only # def testPrefetchToDeviceCorrectPlacement(self): # # if not test_util.is_gpu_available(): # self.skipTest("No GPU available") # # dataset = dataset_ops.Dataset.range(10) # dataset = dataset.apply(prefetching_ops.prefetch_to_device("/gpu:0")) # # self.assertIn("gpu:0", dataset._variant_tensor.device.lower()) @test_util.deprecated_graph_mode_only def testPrefetchToDeviceWithReInit(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.prefetch_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue(structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testPrefetchToDeviceGpuWithReInit(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.prefetch_to_device("/gpu:0")) iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) # @test_util.deprecated_graph_mode_only # def testIteratorOnDeviceEagerMode(self): # if not test_util.is_gpu_available(): # self.skipTest("No GPU available") # # dataset = dataset_ops.Dataset.range(10) # dataset = dataset.apply(prefetching_ops.prefetch_to_device("/gpu:0")) # iterator = iter(dataset) # data = next(iterator) # # self.assertIn("gpu:0", dataset._variant_tensor.device.lower()) # self.assertIn("gpu:0", iterator._iterator_resource.device.lower()) # self.assertIn("gpu:0", data.device.lower()) # # @test_util.deprecated_graph_mode_only # def testIteratorOnDeviceGraphModeOneShotIterator(self): # if not test_util.is_gpu_available(): # self.skipTest("No GPU available") # # dataset = dataset_ops.Dataset.range(10) # dataset = dataset.apply(prefetching_ops.prefetch_to_device("/gpu:0")) # iterator = dataset_ops.make_one_shot_iterator(dataset) # data = iterator.get_next() # # self.assertIn("gpu:0", dataset._variant_tensor.device.lower()) # self.assertIn("gpu:0", iterator._iterator_resource.device.lower()) # self.assertIn("gpu:0", data.device.lower()) # # @test_util.deprecated_graph_mode_only # def testIteratorOnDeviceGraphModeInitializableIterator(self): # if not test_util.is_gpu_available(): # self.skipTest("No GPU available") # # dataset = dataset_ops.Dataset.range(10) # dataset = dataset.apply(prefetching_ops.prefetch_to_device("/gpu:0")) # iterator = dataset_ops.make_initializable_iterator(dataset) # data = iterator.get_next() # # self.assertIn("gpu:0", dataset._variant_tensor.device.lower()) # self.assertIn("gpu:0", iterator._iterator_resource.device.lower()) # self.assertIn("gpu:0", data.device.lower()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/prefetch_to_device_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.ignore_errors()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python.data.experimental.ops import error_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.lib.io import python_io from tensorflow.python.ops import array_ops from tensorflow.python.ops import io_ops from tensorflow.python.platform import test from tensorflow.python.util import compat _NUMPY_RANDOM_SEED = 42 @test_util.run_all_in_graph_and_eager_modes class IgnoreErrorsTest(test_base.DatasetTestBase): def testMapIgnoreError(self): components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32) dataset = ( dataset_ops.Dataset.from_tensor_slices(components) .map(lambda x: array_ops.check_numerics(x, "message")).apply( error_ops.ignore_errors())) get_next = self.getNext(dataset) for x in [1., 2., 3., 5.]: self.assertEqual(x, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testParallelMapIgnoreError(self): components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32) dataset = ( dataset_ops.Dataset.from_tensor_slices(components).map( lambda x: array_ops.check_numerics(x, "message"), num_parallel_calls=2).prefetch(2).apply(error_ops.ignore_errors())) get_next = self.getNext(dataset) for x in [1., 2., 3., 5.]: self.assertEqual(x, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testReadFileIgnoreError(self): def write_string_to_file(value, filename): with open(filename, "w") as f: f.write(value) filenames = [ os.path.join(self.get_temp_dir(), "file_%d.txt" % i) for i in range(5) ] for filename in filenames: write_string_to_file(filename, filename) dataset = ( dataset_ops.Dataset.from_tensor_slices(filenames).map( io_ops.read_file, num_parallel_calls=2).prefetch(2).apply(error_ops.ignore_errors())) get_next = self.getNext(dataset) # All of the files are present. for filename in filenames: self.assertEqual(compat.as_bytes(filename), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Delete one of the files. os.remove(filenames[0]) # Attempting to read filenames[0] will fail, but ignore_errors() # will catch the error. get_next = self.getNext(dataset) for filename in filenames[1:]: self.assertEqual(compat.as_bytes(filename), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testTFRecordDatasetIgnoreError(self): filenames = [] for i in range(5): fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) filenames.append(fn) writer = python_io.TFRecordWriter(fn) for j in range(10): writer.write(b"record") writer.close() # Append corrupted data with open(fn, "a") as f: f.write("corrupted data") dataset = readers.TFRecordDataset(filenames).apply( error_ops.ignore_errors()) get_next = self.getNext(dataset) # All of the files are present. for filename in filenames: for j in range(10): self.assertEqual(b"record", self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/ignore_errors_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.copy_to_device()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import config_pb2 from tensorflow.python.compat import compat from tensorflow.python.data.experimental.ops import prefetching_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.util import structure from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.platform import test from tensorflow.python.util import compat as util_compat # TODO(b/117581999): add eager coverage when supported. class CopyToDeviceTest(test_base.DatasetTestBase): @test_util.deprecated_graph_mode_only def testCopyToDevice(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceInt32(self): host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int32, next_element.dtype) self.assertEqual((4,), next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToSameDevice(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:0")) with ops.device("/cpu:0"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceWithPrefetch(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyDictToDevice(self): host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x}) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element["a"].dtype) self.assertEqual([], next_element["a"].shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual({"a": i}, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyDictToDeviceWithPrefetch(self): host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x}) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element["a"].dtype) self.assertEqual([], next_element["a"].shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual({"a": i}, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopySparseTensorsToDevice(self): def make_tensor(i): return sparse_tensor.SparseTensorValue( indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2]) host_dataset = dataset_ops.Dataset.range(10).map(make_tensor) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): actual = self.evaluate(next_element) self.assertAllEqual([i], actual.values) self.assertAllEqual([[0, 0]], actual.indices) self.assertAllEqual([2, 2], actual.dense_shape) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopySparseTensorsToDeviceWithPrefetch(self): def make_tensor(i): return sparse_tensor.SparseTensorValue( indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2]) host_dataset = dataset_ops.Dataset.range(10).map(make_tensor) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): actual = self.evaluate(next_element) self.assertAllEqual([i], actual.values) self.assertAllEqual([[0, 0]], actual.indices) self.assertAllEqual([2, 2], actual.dense_shape) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpu(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuWithPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuWithMap(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") def generator(): for i in range(10): yield i, float(i), str(i) host_dataset = dataset_ops.Dataset.from_generator( generator, output_types=(dtypes.int32, dtypes.float32, dtypes.string)) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) def gpu_map_func(x, y, z): return math_ops.square(x), math_ops.square(y), z device_dataset = device_dataset.apply( prefetching_ops.map_on_gpu(gpu_map_func)) options = dataset_ops.Options() options.experimental_optimization.autotune = False device_dataset = device_dataset.with_options(options) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(10): x, y, z = self.evaluate(next_element) self.assertEqual(i**2, x) self.assertEqual(float(i**2), y) self.assertEqual(util_compat.as_bytes(str(i)), z) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuInt32(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuInt32AndPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuStrings(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuStringsAndPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDevicePingPongCPUGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with compat.forward_compatibility_horizon(2018, 8, 4): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0")) back_to_cpu_dataset = device_dataset.apply( prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0")) with ops.device("/cpu:0"): iterator = dataset_ops.make_initializable_iterator(back_to_cpu_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceWithReInit(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceWithReInitAndPrefetch(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuWithReInit(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testCopyToDeviceGpuWithReInitAndPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element) @test_util.deprecated_graph_mode_only def testIteratorGetNextAsOptionalOnGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(3) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_elem = iterator_ops.get_next_as_optional(iterator) elem_has_value_t = next_elem.has_value() elem_value_t = next_elem.get_value() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): # Before initializing the iterator, evaluating the optional fails with # a FailedPreconditionError. with self.assertRaises(errors.FailedPreconditionError): self.evaluate(elem_has_value_t) with self.assertRaises(errors.FailedPreconditionError): self.evaluate(elem_value_t) # For each element of the dataset, assert that the optional evaluates to # the expected value. self.evaluate(iterator.initializer) for i in range(3): elem_has_value, elem_value = self.evaluate( [elem_has_value_t, elem_value_t]) self.assertTrue(elem_has_value) self.assertEqual(i, elem_value) # After exhausting the iterator, `next_elem.has_value()` will evaluate to # false, and attempting to get the value will fail. for _ in range(2): self.assertFalse(self.evaluate(elem_has_value_t)) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_value_t) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/copy_to_device_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.make_batched_features_dataset()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers as core_readers from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import io_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class MakeBatchedFeaturesDatasetTest( reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase): def testRead(self): for batch_size in [1, 2]: for num_epochs in [1, 10]: # Basic test: read from file 0. self.outputs = self.getNext( self.make_batch_feature( filenames=self.test_filenames[0], label_key="label", num_epochs=num_epochs, batch_size=batch_size)) self.verify_records( batch_size, 0, num_epochs=num_epochs, label_key_provided=True) with self.assertRaises(errors.OutOfRangeError): self._next_actual_batch(label_key_provided=True) # Basic test: read from file 1. self.outputs = self.getNext( self.make_batch_feature( filenames=self.test_filenames[1], label_key="label", num_epochs=num_epochs, batch_size=batch_size)) self.verify_records( batch_size, 1, num_epochs=num_epochs, label_key_provided=True) with self.assertRaises(errors.OutOfRangeError): self._next_actual_batch(label_key_provided=True) # Basic test: read from both files. self.outputs = self.getNext( self.make_batch_feature( filenames=self.test_filenames, label_key="label", num_epochs=num_epochs, batch_size=batch_size)) self.verify_records( batch_size, num_epochs=num_epochs, label_key_provided=True) with self.assertRaises(errors.OutOfRangeError): self._next_actual_batch(label_key_provided=True) # Basic test: read from both files. self.outputs = self.getNext( self.make_batch_feature( filenames=self.test_filenames, num_epochs=num_epochs, batch_size=batch_size)) self.verify_records(batch_size, num_epochs=num_epochs) with self.assertRaises(errors.OutOfRangeError): self._next_actual_batch() def testReadWithEquivalentDataset(self): features = { "file": parsing_ops.FixedLenFeature([], dtypes.int64), "record": parsing_ops.FixedLenFeature([], dtypes.int64), } dataset = ( core_readers.TFRecordDataset(self.test_filenames) .map(lambda x: parsing_ops.parse_single_example(x, features)) .repeat(10).batch(2)) next_element = self.getNext(dataset) for file_batch, _, _, _, record_batch, _ in self._next_expected_batch( range(self._num_files), 2, 10): actual_batch = self.evaluate(next_element()) self.assertAllEqual(file_batch, actual_batch["file"]) self.assertAllEqual(record_batch, actual_batch["record"]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testReadWithFusedShuffleRepeatDataset(self): num_epochs = 5 total_records = num_epochs * self._num_records for batch_size in [1, 2]: # Test that shuffling with same seed produces the same result. outputs1 = self.getNext( self.make_batch_feature( filenames=self.test_filenames[0], num_epochs=num_epochs, batch_size=batch_size, shuffle=True, shuffle_seed=5)) outputs2 = self.getNext( self.make_batch_feature( filenames=self.test_filenames[0], num_epochs=num_epochs, batch_size=batch_size, shuffle=True, shuffle_seed=5)) for _ in range(total_records // batch_size): batch1 = self._run_actual_batch(outputs1) batch2 = self._run_actual_batch(outputs2) for i in range(len(batch1)): self.assertAllEqual(batch1[i], batch2[i]) # Test that shuffling with different seeds produces a different order. outputs1 = self.getNext( self.make_batch_feature( filenames=self.test_filenames[0], num_epochs=num_epochs, batch_size=batch_size, shuffle=True, shuffle_seed=5)) outputs2 = self.getNext( self.make_batch_feature( filenames=self.test_filenames[0], num_epochs=num_epochs, batch_size=batch_size, shuffle=True, shuffle_seed=15)) all_equal = True for _ in range(total_records // batch_size): batch1 = self._run_actual_batch(outputs1) batch2 = self._run_actual_batch(outputs2) for i in range(len(batch1)): all_equal = all_equal and np.array_equal(batch1[i], batch2[i]) self.assertFalse(all_equal) def testParallelReadersAndParsers(self): num_epochs = 5 for batch_size in [1, 2]: for reader_num_threads in [2, 4]: for parser_num_threads in [2, 4]: self.outputs = self.getNext( self.make_batch_feature( filenames=self.test_filenames, label_key="label", num_epochs=num_epochs, batch_size=batch_size, reader_num_threads=reader_num_threads, parser_num_threads=parser_num_threads)) self.verify_records( batch_size, num_epochs=num_epochs, label_key_provided=True, interleave_cycle_length=reader_num_threads) with self.assertRaises(errors.OutOfRangeError): self._next_actual_batch(label_key_provided=True) self.outputs = self.getNext( self.make_batch_feature( filenames=self.test_filenames, num_epochs=num_epochs, batch_size=batch_size, reader_num_threads=reader_num_threads, parser_num_threads=parser_num_threads)) self.verify_records( batch_size, num_epochs=num_epochs, interleave_cycle_length=reader_num_threads) with self.assertRaises(errors.OutOfRangeError): self._next_actual_batch() def testDropFinalBatch(self): for batch_size in [1, 2]: for num_epochs in [1, 10]: with ops.Graph().as_default(): # Basic test: read from file 0. outputs = self.make_batch_feature( filenames=self.test_filenames[0], label_key="label", num_epochs=num_epochs, batch_size=batch_size, drop_final_batch=True) for tensor in nest.flatten(outputs): if isinstance(tensor, ops.Tensor): # Guard against SparseTensor. self.assertEqual(tensor.shape[0], batch_size) def testIndefiniteRepeatShapeInference(self): dataset = self.make_batch_feature( filenames=self.test_filenames[0], label_key="label", num_epochs=None, batch_size=32) for shape, clazz in zip( nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)), nest.flatten(dataset_ops.get_legacy_output_classes(dataset))): if issubclass(clazz, ops.Tensor): self.assertEqual(32, shape[0]) def testOldStyleReader(self): with self.assertRaisesRegexp( TypeError, r"The `reader` argument must return a `Dataset` object. " r"`tf.ReaderBase` subclasses are not supported."): _ = readers.make_batched_features_dataset( file_pattern=self.test_filenames[0], batch_size=32, features={ "file": parsing_ops.FixedLenFeature([], dtypes.int64), "record": parsing_ops.FixedLenFeature([], dtypes.int64), "keywords": parsing_ops.VarLenFeature(dtypes.string), "label": parsing_ops.FixedLenFeature([], dtypes.string), }, reader=io_ops.TFRecordReader) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/make_batched_features_dataset_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the private `_AutoShardDataset` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.experimental.ops import interleave_ops from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.experimental.ops import unique from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers as core_readers from tensorflow.python.framework import combinations from tensorflow.python.framework import errors from tensorflow.python.ops import string_ops from tensorflow.python.platform import test def chunk(l, n): for i in range(0, len(l), n): yield l[i:i + n] class AutoShardDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase, parameterized.TestCase): def setUp(self): super(AutoShardDatasetTest, self).setUp() self._num_files = 10 self._num_records = 10 self.test_filenames = self._createFiles() def assertDatasetProducesWithShuffle(self, dataset, expected, batch, num_examples, shuffle): if shuffle: actual = [] next_fn = self.getNext(dataset) for _ in range(num_examples): elem = self.evaluate(next_fn()) if isinstance(elem, tuple): actual.extend(elem) else: actual.extend(elem.tolist()) self.assertCountEqual(actual, expected) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_fn()) else: self.assertDatasetProduces(dataset, list(chunk(expected, batch))) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(shuffle=[True, False]))) def testFlatMapReaderPipeline(self, shuffle): dataset = dataset_ops.Dataset.list_files( self.test_filenames, shuffle=shuffle) dataset = dataset.flat_map(core_readers.TFRecordDataset) dataset = dataset.batch(5) dataset = distribute._AutoShardDataset(dataset, 5, 3) expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in (3, 8) for r in range(0, 10) ] self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) @combinations.generate(test_base.default_test_combinations()) def testZipReaderPipeline(self): dataset1 = dataset_ops.Dataset.list_files( self.test_filenames, shuffle=False) dataset1 = dataset1.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset2 = dataset_ops.Dataset.list_files( self.test_filenames, shuffle=False) dataset2 = dataset2.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) dataset = distribute._AutoShardDataset(dataset, 5, 3) expected = [ (b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension for r in range(0, 10) for f in (3, 8) ] self.assertDatasetProduces(dataset, expected) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(shuffle=[True, False]))) def testConcatenateReaderPipeline(self, shuffle): dataset1 = dataset_ops.Dataset.list_files( self.test_filenames, shuffle=shuffle) dataset1 = dataset1.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset1 = dataset1.batch(5) dataset2 = dataset_ops.Dataset.list_files( self.test_filenames, shuffle=shuffle) dataset2 = dataset2.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset2 = dataset2.batch(5) dataset = dataset1.concatenate(dataset2) dataset = distribute._AutoShardDataset(dataset, 5, 3) expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for r in range(0, 10) for f in (3, 8) ] expected += expected self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(shuffle=[True, False]))) def testPipelineWithMap(self, shuffle): dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False) dataset = dataset.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000)) dataset = dataset.batch(5) dataset = distribute._AutoShardDataset(dataset, 5, 3) expected = [ b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for r in range(0, 10) for f in (3, 8) ] self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) @combinations.generate(test_base.default_test_combinations()) def testDirectFilenameTFRecordReaderPipeline(self): dataset = core_readers.TFRecordDataset(self.test_filenames) dataset = distribute._AutoShardDataset(dataset, 5, 0) expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in (0, 5) for r in range(0, 10) ] self.assertDatasetProduces(dataset, expected) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(shuffle=[True, False]))) def testValidPipelineWithRangeDataset(self, shuffle): dataset = dataset_ops.Dataset.range(self._num_files) dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda [self.get_temp_dir(), string_ops.string_format("/tf_record.{}.txt", [n])])) dataset = dataset.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000)) dataset = dataset.batch(5) dataset = distribute._AutoShardDataset(dataset, 5, 3) expected = [ b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for r in range(0, 10) for f in (3, 8) ] self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(params=[(1, 0, 10, 10), (2, 1, 20, 5), (10, 1, 1, 10)]))) def testStandardReaderPipeline(self, params): num_epochs, index, batch_size, parallel_reads = params dataset = readers.make_tf_record_dataset( file_pattern=self.test_filenames, num_epochs=num_epochs, batch_size=batch_size, parser_fn=None, num_parallel_reads=parallel_reads, drop_final_batch=True, shuffle=False) dataset = distribute._AutoShardDataset(dataset, 2, index) outputs = self.getNext(dataset) self._verify_records( outputs, batch_size=batch_size, file_index=[i for i in range(index, self._num_records, 2)], num_epochs=num_epochs, interleave_cycle_length=parallel_reads, drop_final_batch=True, use_parser_fn=None) with self.assertRaises(errors.OutOfRangeError): self.evaluate(outputs()) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(shuffle=[True, False]))) def testSampleResNetPipeline(self, shuffle): dataset = dataset_ops.Dataset.list_files( self.test_filenames, shuffle=shuffle) dataset = dataset.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset = dataset.batch(5) dataset = distribute._AutoShardDataset(dataset, 5, 3) expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for r in range(0, 10) for f in (3, 8) ] self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle) @combinations.generate(test_base.default_test_combinations()) def testWorkersGreaterThanNumFiles(self): dataset = dataset_ops.Dataset.list_files(self.test_filenames) dataset = dataset.apply( interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10)) dataset = dataset.batch(5) dataset = distribute._AutoShardDataset(dataset, 500, 499) self.assertDatasetProduces(dataset, []) @combinations.generate(test_base.default_test_combinations()) def testTFRecordReaderWithDirectFileNames(self): # Using `_TFRecordDataset` creates a raw op rather than wrapping it around # a flat_map automatically. dataset = core_readers._TFRecordDataset(self.test_filenames) dataset = distribute._AutoShardDataset(dataset, 5, 0) expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in (0, 5) ] self.assertDatasetProduces(dataset, expected) @combinations.generate(test_base.default_test_combinations()) def testTFRecordReaderWithDirectFileNamesAndShapes(self): # Using `_TFRecordDataset` creates a raw op rather than wrapping it around # a flat_map automatically. dataset = core_readers._TFRecordDataset(self.test_filenames) # BatchDataset contains `output_types` and `output_shapes` dataset = dataset.batch(5) dataset = distribute._AutoShardDataset(dataset, 2, 0) expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in range(0, 5) ] self.assertDatasetProduces(dataset, list(chunk(expected, 5))) @combinations.generate(test_base.default_test_combinations()) def testShardOutOfRange(self): dataset = dataset_ops.Dataset.range(5) with self.assertRaises(errors.InvalidArgumentError): dataset = distribute._AutoShardDataset(dataset, 10, 0) self.evaluate(self.getNext(dataset)()) @combinations.generate(test_base.default_test_combinations()) def testShardOutOfRangeEmptyDataset(self): dataset = dataset_ops.Dataset.range(0) with self.assertRaises(errors.OutOfRangeError): dataset = distribute._AutoShardDataset(dataset, 10, 0) self.evaluate(self.getNext(dataset)()) @combinations.generate(test_base.default_test_combinations()) def testNoReaderPipelines(self): dataset = dataset_ops.Dataset.range(1024) dataset = distribute._AutoShardDataset(dataset, 2, 0) self.assertDatasetProduces(dataset, [i for i in range(1024) if i % 2 == 0]) @combinations.generate(test_base.default_test_combinations()) def testUnknownOpInPipelineStillShardsAtTheEnd(self): dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False) dataset = dataset.flat_map(core_readers.TFRecordDataset) dataset = dataset.apply(unique.unique()) dataset = distribute._AutoShardDataset(dataset, 5, 0) expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in (0, 5) ] self.assertDatasetProduces(dataset, expected) @combinations.generate(test_base.default_test_combinations()) def testInvalidWorkerIndex(self): dataset = dataset_ops.Dataset.list_files(self.test_filenames) dataset = dataset.flat_map(core_readers.TFRecordDataset) dataset = dataset.batch(5) with self.assertRaises(errors.InvalidArgumentError): dataset = distribute._AutoShardDataset(dataset, 2, 2) self.evaluate(self.getNext(dataset)()) class AutoShardTextLineDatasetTest( reader_dataset_ops_test_base.TextLineDatasetTestBase, parameterized.TestCase): def setUp(self): super(AutoShardTextLineDatasetTest, self).setUp() self._num_files = 10 self._num_records = 10 self.test_filenames = self._createFiles(self._num_files, self._num_records) @combinations.generate(test_base.default_test_combinations()) def testDirectFilenameTextLineReaderPipeline(self): dataset = core_readers.TextLineDataset(self.test_filenames) dataset = distribute._AutoShardDataset(dataset, 5, 0) expected = [ b"%d: %d" % (f, r) # pylint:disable=g-complex-comprehension for f in (0, 5) for r in range(0, 10) ] self.assertDatasetProduces(dataset, expected) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.CsvDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import zlib from tensorflow.python.data.experimental.ops import error_ops from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import readers as core_readers from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class CsvDatasetTest(test_base.DatasetTestBase): def _setup_files(self, inputs, linebreak='\n', compression_type=None): filenames = [] for i, ip in enumerate(inputs): fn = os.path.join(self.get_temp_dir(), 'temp_%d.csv' % i) contents = linebreak.join(ip).encode('utf-8') if compression_type is None: with open(fn, 'wb') as f: f.write(contents) elif compression_type == 'GZIP': with gzip.GzipFile(fn, 'wb') as f: f.write(contents) elif compression_type == 'ZLIB': contents = zlib.compress(contents) with open(fn, 'wb') as f: f.write(contents) else: raise ValueError('Unsupported compression_type', compression_type) filenames.append(fn) return filenames def _make_test_datasets(self, inputs, **kwargs): # Test by comparing its output to what we could get with map->decode_csv filenames = self._setup_files(inputs) dataset_expected = core_readers.TextLineDataset(filenames) dataset_expected = dataset_expected.map( lambda l: parsing_ops.decode_csv(l, **kwargs)) dataset_actual = readers.CsvDataset(filenames, **kwargs) return (dataset_actual, dataset_expected) def _test_by_comparison(self, inputs, **kwargs): """Checks that CsvDataset is equiv to TextLineDataset->map(decode_csv).""" dataset_actual, dataset_expected = self._make_test_datasets( inputs, **kwargs) self.assertDatasetsEqual(dataset_actual, dataset_expected) def _verify_output_or_err(self, dataset, expected_output=None, expected_err_re=None): if expected_err_re is None: # Verify that output is expected, without errors nxt = self.getNext(dataset) expected_output = [[ v.encode('utf-8') if isinstance(v, str) else v for v in op ] for op in expected_output] for value in expected_output: op = self.evaluate(nxt()) self.assertAllEqual(op, value) with self.assertRaises(errors.OutOfRangeError): self.evaluate(nxt()) else: nxt = self.getNext(dataset) while True: try: self.evaluate(nxt()) except errors.OutOfRangeError: break def _test_dataset( self, inputs, expected_output=None, expected_err_re=None, linebreak='\n', compression_type=None, # Used for both setup and parsing **kwargs): """Checks that elements produced by CsvDataset match expected output.""" # Convert str type because py3 tf strings are bytestrings filenames = self._setup_files(inputs, linebreak, compression_type) kwargs['compression_type'] = compression_type if expected_err_re is not None: # Verify that OpError is produced as expected with self.assertRaisesOpError(expected_err_re): dataset = readers.CsvDataset(filenames, **kwargs) self._verify_output_or_err(dataset, expected_output, expected_err_re) else: dataset = readers.CsvDataset(filenames, **kwargs) self._verify_output_or_err(dataset, expected_output, expected_err_re) def testCsvDataset_requiredFields(self): record_defaults = [[]] * 4 inputs = [['1,2,3,4']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_int(self): record_defaults = [[0]] * 4 inputs = [['1,2,3,4', '5,6,7,8']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_float(self): record_defaults = [[0.0]] * 4 inputs = [['1.0,2.1,3.2,4.3', '5.4,6.5,7.6,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_string(self): record_defaults = [['']] * 4 inputs = [['1.0,2.1,hello,4.3', '5.4,6.5,goodbye,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withEmptyFields(self): record_defaults = [[0]] * 4 inputs = [[',,,', '1,1,1,', ',2,2,2']] self._test_dataset( inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]], record_defaults=record_defaults) def testCsvDataset_errWithUnquotedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,2"3,4']] self._test_dataset( inputs, expected_err_re='Unquoted fields cannot have quotes inside', record_defaults=record_defaults) def testCsvDataset_errWithUnescapedQuotes(self): record_defaults = [['']] * 3 inputs = [['"a"b","c","d"']] self._test_dataset( inputs, expected_err_re= 'Quote inside a string has to be escaped by another quote', record_defaults=record_defaults) def testCsvDataset_ignoreErrWithUnescapedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,"2"3",4', '1,"2"3",4",5,5', 'a,b,"c"d"', 'e,f,g']] filenames = self._setup_files(inputs) dataset = readers.CsvDataset(filenames, record_defaults=record_defaults) dataset = dataset.apply(error_ops.ignore_errors()) self._verify_output_or_err(dataset, [['e', 'f', 'g']]) def testCsvDataset_ignoreErrWithUnquotedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,2"3,4', 'a,b,c"d', '9,8"7,6,5', 'e,f,g']] filenames = self._setup_files(inputs) dataset = readers.CsvDataset(filenames, record_defaults=record_defaults) dataset = dataset.apply(error_ops.ignore_errors()) self._verify_output_or_err(dataset, [['e', 'f', 'g']]) def testCsvDataset_withNoQuoteDelimAndUnquotedQuotes(self): record_defaults = [['']] * 3 inputs = [['1,2"3,4']] self._test_by_comparison( inputs, record_defaults=record_defaults, use_quote_delim=False) def testCsvDataset_mixedTypes(self): record_defaults = [ constant_op.constant([], dtype=dtypes.int32), constant_op.constant([], dtype=dtypes.float32), constant_op.constant([], dtype=dtypes.string), constant_op.constant([], dtype=dtypes.float64) ] inputs = [['1,2.1,3.2,4.3', '5,6.5,7.6,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withUseQuoteDelimFalse(self): record_defaults = [['']] * 4 inputs = [['1,2,"3,4"', '"5,6",7,8']] self._test_by_comparison( inputs, record_defaults=record_defaults, use_quote_delim=False) def testCsvDataset_withFieldDelim(self): record_defaults = [[0]] * 4 inputs = [['1:2:3:4', '5:6:7:8']] self._test_by_comparison( inputs, record_defaults=record_defaults, field_delim=':') def testCsvDataset_withNaValue(self): record_defaults = [[0]] * 4 inputs = [['1,NA,3,4', 'NA,6,7,8']] self._test_by_comparison( inputs, record_defaults=record_defaults, na_value='NA') def testCsvDataset_withSelectCols(self): record_defaults = [['']] * 2 inputs = [['1,2,3,4', '"5","6","7","8"']] self._test_by_comparison( inputs, record_defaults=record_defaults, select_cols=[1, 2]) def testCsvDataset_withSelectColsTooHigh(self): record_defaults = [[0]] * 2 inputs = [['1,2,3,4', '5,6,7,8']] self._test_dataset( inputs, expected_err_re='Expect 2 fields but have 1 in record', record_defaults=record_defaults, select_cols=[3, 4]) def testCsvDataset_withOneCol(self): record_defaults = [['NA']] inputs = [['0', '', '2']] self._test_dataset( inputs, [['0'], ['NA'], ['2']], record_defaults=record_defaults) def testCsvDataset_withMultipleFiles(self): record_defaults = [[0]] * 4 inputs = [['1,2,3,4', '5,6,7,8'], ['5,6,7,8']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withLeadingAndTrailingSpaces(self): record_defaults = [[0.0]] * 4 inputs = [['0, 1, 2, 3']] expected = [[0.0, 1.0, 2.0, 3.0]] self._test_dataset(inputs, expected, record_defaults=record_defaults) def testCsvDataset_errorWithMissingDefault(self): record_defaults = [[]] * 2 inputs = [['0,']] self._test_dataset( inputs, expected_err_re='Field 1 is required but missing in record!', record_defaults=record_defaults) def testCsvDataset_errorWithFewerDefaultsThanFields(self): record_defaults = [[0.0]] * 2 inputs = [['0,1,2,3']] self._test_dataset( inputs, expected_err_re='Expect 2 fields but have more in record', record_defaults=record_defaults) def testCsvDataset_errorWithMoreDefaultsThanFields(self): record_defaults = [[0.0]] * 5 inputs = [['0,1,2,3']] self._test_dataset( inputs, expected_err_re='Expect 5 fields but have 4 in record', record_defaults=record_defaults) def testCsvDataset_withHeader(self): record_defaults = [[0]] * 2 inputs = [['col1,col2', '1,2']] expected = [[1, 2]] self._test_dataset( inputs, expected, record_defaults=record_defaults, header=True, ) def testCsvDataset_withHeaderAndNoRecords(self): record_defaults = [[0]] * 2 inputs = [['col1,col2']] expected = [] self._test_dataset( inputs, expected, record_defaults=record_defaults, header=True, ) def testCsvDataset_errorWithHeaderEmptyFile(self): record_defaults = [[0]] * 2 inputs = [[]] expected_err_re = "Can't read header of file" self._test_dataset( inputs, expected_err_re=expected_err_re, record_defaults=record_defaults, header=True, ) def testCsvDataset_withEmptyFile(self): record_defaults = [['']] * 2 inputs = [['']] # Empty file self._test_dataset( inputs, expected_output=[], record_defaults=record_defaults) def testCsvDataset_errorWithEmptyRecord(self): record_defaults = [['']] * 2 inputs = [['', '1,2']] # First record is empty self._test_dataset( inputs, expected_err_re='Expect 2 fields but have 1 in record', record_defaults=record_defaults) def testCsvDataset_withChainedOps(self): # Testing that one dataset can create multiple iterators fine. # `repeat` creates multiple iterators from the same C++ Dataset. record_defaults = [[0]] * 4 inputs = [['1,,3,4', '5,6,,8']] ds_actual, ds_expected = self._make_test_datasets( inputs, record_defaults=record_defaults) self.assertDatasetsEqual( ds_actual.repeat(5).prefetch(1), ds_expected.repeat(5).prefetch(1)) def testCsvDataset_withTypeDefaults(self): # Testing using dtypes as record_defaults for required fields record_defaults = [dtypes.float32, [0.0]] inputs = [['1.0,2.0', '3.0,4.0']] self._test_dataset( inputs, [[1.0, 2.0], [3.0, 4.0]], record_defaults=record_defaults, ) def testMakeCsvDataset_fieldOrder(self): data = [[ '1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19', '1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19' ]] file_path = self._setup_files(data) ds = readers.make_csv_dataset( file_path, batch_size=1, shuffle=False, num_epochs=1) nxt = self.getNext(ds) result = list(self.evaluate(nxt()).values()) self.assertEqual(result, sorted(result)) ## The following tests exercise parsing logic for quoted fields def testCsvDataset_withQuoted(self): record_defaults = [['']] * 4 inputs = [['"a","b","c :)","d"', '"e","f","g :(","h"']] self._test_by_comparison(inputs, record_defaults=record_defaults) def testCsvDataset_withOneColAndQuotes(self): record_defaults = [['']] inputs = [['"0"', '"1"', '"2"']] self._test_dataset( inputs, [['0'], ['1'], ['2']], record_defaults=record_defaults) def testCsvDataset_withNewLine(self): # In this case, we expect it to behave differently from # TextLineDataset->map(decode_csv) since that flow has bugs record_defaults = [['']] * 4 inputs = [['a,b,"""c""\n0","d\ne"', 'f,g,h,i']] expected = [['a', 'b', '"c"\n0', 'd\ne'], ['f', 'g', 'h', 'i']] self._test_dataset(inputs, expected, record_defaults=record_defaults) def testCsvDataset_withNewLineInUnselectedCol(self): record_defaults = [['']] inputs = [['1,"2\n3",4', '5,6,7']] self._test_dataset( inputs, expected_output=[['1'], ['5']], record_defaults=record_defaults, select_cols=[0]) def testCsvDataset_withMultipleNewLines(self): # In this case, we expect it to behave differently from # TextLineDataset->map(decode_csv) since that flow has bugs record_defaults = [['']] * 4 inputs = [['a,"b\n\nx","""c""\n \n0","d\ne"', 'f,g,h,i']] expected = [['a', 'b\n\nx', '"c"\n \n0', 'd\ne'], ['f', 'g', 'h', 'i']] self._test_dataset(inputs, expected, record_defaults=record_defaults) def testCsvDataset_errorWithTerminateMidRecord(self): record_defaults = [['']] * 4 inputs = [['a,b,c,"a']] self._test_dataset( inputs, expected_err_re= 'Reached end of file without closing quoted field in record', record_defaults=record_defaults) def testCsvDataset_withEscapedQuotes(self): record_defaults = [['']] * 4 inputs = [['1.0,2.1,"she said: ""hello""",4.3', '5.4,6.5,goodbye,8.7']] self._test_by_comparison(inputs, record_defaults=record_defaults) ## Testing that parsing works with all buffer sizes, quoted/unquoted fields, ## and different types of line breaks def testCsvDataset_withInvalidBufferSize(self): record_defaults = [['']] * 4 inputs = [['a,b,c,d']] self._test_dataset( inputs, expected_err_re='buffer_size should be positive', record_defaults=record_defaults, buffer_size=0) def _test_dataset_on_buffer_sizes(self, inputs, expected, linebreak, record_defaults, compression_type=None, num_sizes_to_test=20): # Testing reading with a range of buffer sizes that should all work. for i in list(range(1, 1 + num_sizes_to_test)) + [None]: self._test_dataset( inputs, expected, linebreak=linebreak, compression_type=compression_type, record_defaults=record_defaults, buffer_size=i) def testCsvDataset_withLF(self): record_defaults = [['NA']] * 3 inputs = [['abc,def,ghi', '0,1,2', ',,']] expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\n', record_defaults=record_defaults) def testCsvDataset_withCR(self): # Test that when the line separator is '\r', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['abc,def,ghi', '0,1,2', ',,']] expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r', record_defaults=record_defaults) def testCsvDataset_withCRLF(self): # Test that when the line separator is '\r\n', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['abc,def,ghi', '0,1,2', ',,']] expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', record_defaults=record_defaults) def testCsvDataset_withBufferSizeAndQuoted(self): record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\n', record_defaults=record_defaults) def testCsvDataset_withCRAndQuoted(self): # Test that when the line separator is '\r', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r', record_defaults=record_defaults) def testCsvDataset_withCRLFAndQuoted(self): # Test that when the line separator is '\r\n', parsing works with all buffer # sizes record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', record_defaults=record_defaults) def testCsvDataset_withGzipCompressionType(self): record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', compression_type='GZIP', record_defaults=record_defaults) def testCsvDataset_withZlibCompressionType(self): record_defaults = [['NA']] * 3 inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']] expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'], ['NA', 'NA', 'NA']] self._test_dataset_on_buffer_sizes( inputs, expected, linebreak='\r\n', compression_type='ZLIB', record_defaults=record_defaults) def testCsvDataset_withScalarDefaults(self): record_defaults = [constant_op.constant(0, dtype=dtypes.int64)] * 4 inputs = [[',,,', '1,1,1,', ',2,2,2']] self._test_dataset( inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]], record_defaults=record_defaults) def testCsvDataset_with2DDefaults(self): record_defaults = [constant_op.constant([[0]], dtype=dtypes.int64)] * 4 inputs = [[',,,', '1,1,1,', ',2,2,2']] if context.executing_eagerly(): err_spec = errors.InvalidArgumentError, ( 'Each record default should be at ' 'most rank 1.') else: err_spec = ValueError, 'Shape must be at most rank 1 but is rank 2' with self.assertRaisesWithPredicateMatch(*err_spec): self._test_dataset( inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]], record_defaults=record_defaults) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/csv_dataset_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for testing the input pipeline statistics gathering ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re import numpy as np from tensorflow.core.framework import summary_pb2 from tensorflow.core.util import event_pb2 from tensorflow.python import tf2 from tensorflow.python.data.experimental.ops import stats_aggregator from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.lib.io import tf_record from tensorflow.python.platform import gfile class StatsDatasetTestBase(test_base.DatasetTestBase): """Base class for testing statistics gathered in `StatsAggregator`.""" @classmethod def setUpClass(cls): if tf2.enabled(): stats_aggregator._DEFAULT_MAX_QUEUE = 0 # pylint: disable=protected-access stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV2 # TODO(b/116314787): add graph mode support for StatsAggregatorV2. else: stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV1 return test_util.run_all_in_graph_and_eager_modes(cls) def datasetExperimentalStats(self, dataset, aggregator, prefix="", counter_prefix=""): options = dataset_ops.Options() options.experimental_stats.aggregator = aggregator options.experimental_stats.prefix = prefix options.experimental_stats.counter_prefix = counter_prefix options.experimental_stats.latency_all_edges = False return dataset.with_options(options) def regexForNodeName(self, op_name, stats_type=""): if stats_type: return "".join([op_name, r"/_\d+::", stats_type]) return "".join([op_name, r"/_\d+"]) def assertStatisticsContains(self, handle, tag, num_events=-1, offset=0): if tf2.enabled(): self._assertEventContains(handle, tag, num_events, offset) else: self._assertSummaryContains(handle, tag) def assertStatisticsHasCount(self, handle, tag, count, num_events=-1, greater_than=False, offset=0): if tf2.enabled(): self._assertEventHasCount(handle, tag, count, num_events, greater_than, offset) else: self._assertSummaryHasCount(handle, tag, count, greater_than) def assertStatisticsHasSum(self, handle, tag, expected_value, num_events=-1, offset=0): if tf2.enabled(): self._assertEventHasSum(handle, tag, expected_value, num_events, offset) else: self._assertSummaryHasSum(handle, tag, expected_value) def assertStatisticsHasScalarValue(self, handle, tag, expected_value, num_events=-1, offset=0): if tf2.enabled(): self._assertEventHasScalarValue(handle, tag, expected_value, num_events, offset) else: self._assertSummaryHasScalarValue(handle, tag, expected_value) def assertStatisticsHasRange(self, handle, tag, min_value, max_value, num_events=-1, offset=0): if tf2.enabled(): self._assertEventHasRange(handle, tag, min_value, max_value, num_events, offset) else: self._assertSummaryHasRange(handle, tag, min_value, max_value) def _assertSummaryContains(self, summary_str, tag): summary_proto = summary_pb2.Summary() summary_proto.ParseFromString(summary_str) for value in summary_proto.value: if re.match(tag, value.tag): return self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto)) def _assertSummaryHasCount(self, summary_str, tag, expected_value, greater_than=False): summary_proto = summary_pb2.Summary() summary_proto.ParseFromString(summary_str) for value in summary_proto.value: if re.match(tag, value.tag): if greater_than: self.assertGreaterEqual(value.histo.num, expected_value) else: self.assertEqual(expected_value, value.histo.num) return self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto)) def _assertSummaryHasRange(self, summary_str, tag, min_value, max_value): summary_proto = summary_pb2.Summary() summary_proto.ParseFromString(summary_str) for value in summary_proto.value: if re.match(tag, value.tag): self.assertLessEqual(min_value, value.histo.min) self.assertGreaterEqual(max_value, value.histo.max) return self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto)) def _assertSummaryHasSum(self, summary_str, tag, expected_value): summary_proto = summary_pb2.Summary() summary_proto.ParseFromString(summary_str) for value in summary_proto.value: if re.match(tag, value.tag): self.assertEqual(expected_value, value.histo.sum) return self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto)) def _assertSummaryHasScalarValue(self, summary_str, tag, expected_value): summary_proto = summary_pb2.Summary() summary_proto.ParseFromString(summary_str) for value in summary_proto.value: if re.match(tag, value.tag): self.assertEqual(expected_value, value.simple_value) return self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto)) # TODO(b/116314787): add tests to check the correctness of steps as well. def _assertEventContains(self, logdir, tag, num_events, offset): events = _events_from_logdir(logdir) if num_events == -1: self.assertGreater(len(events), 1) for event in events[::-1]: if re.match(tag, event.summary.value[0].tag): return self.fail("Expected tag %r not found in event file in %r" % (tag, logdir)) else: self.assertEqual(len(events), num_events) self.assertTrue( re.match(tag, events[num_events - offset - 1].summary.value[0].tag)) def _assertEventHasCount(self, logdir, tag, count, num_events, greater_than, offset): events = _events_from_logdir(logdir) if num_events == -1: self.assertGreater(len(events), 1) for event in events[::-1]: if re.match(tag, event.summary.value[0].tag): if greater_than: self.assertGreaterEqual(event.summary.value[0].histo.num, count) else: self.assertEqual(count, event.summary.value[0].histo.num) return self.fail("Expected tag %r not found in event file in %r" % (tag, logdir)) else: self.assertEqual(len(events), num_events) self.assertTrue( re.match(tag, events[num_events - offset - 1].summary.value[0].tag)) if greater_than: self.assertGreaterEqual( events[num_events - offset - 1].summary.value[0].histo.num, count) else: self.assertEqual( events[num_events - offset - 1].summary.value[0].histo.num, count) def _assertEventHasSum(self, logdir, tag, expected_value, num_events, offset): events = _events_from_logdir(logdir) if num_events == -1: self.assertGreater(len(events), 1) for event in events[::-1]: if re.match(tag, event.summary.value[0].tag): self.assertEqual(expected_value, event.summary.value[0].histo.sum) return self.fail("Expected tag %r not found in event file in %r" % (tag, logdir)) else: self.assertEqual(len(events), num_events) self.assertTrue( re.match(tag, events[num_events - offset - 1].summary.value[0].tag)) self.assertEqual( events[num_events - offset - 1].summary.value[0].histo.sum, expected_value) def _assertEventHasRange(self, logdir, tag, min_value, max_value, num_events, offset): events = _events_from_logdir(logdir) if num_events == -1: self.assertGreater(len(events), 1) for event in events[::-1]: if re.match(tag, event.summary.value[0].tag): self.assertLessEqual(min_value, event.summary.value[0].histo.min) self.assertGreaterEqual(max_value, event.summary.value[0].histo.max) return self.fail("Expected tag %r not found in event file in %r" % (tag, logdir)) else: self.assertEqual(len(events), num_events) self.assertTrue( re.match(tag, events[num_events - offset - 1].summary.value[0].tag)) self.assertLessEqual( min_value, events[num_events - offset - 1].summary.value[0].histo.min) self.assertGreaterEqual( max_value, events[num_events - offset - 1].summary.value[0].histo.max) def _assertEventHasScalarValue(self, logdir, tag, expected_value, num_events, offset): events = _events_from_logdir(logdir) if num_events == -1: self.assertGreater(len(events), 1) for event in events[::-1]: if re.match(tag, event.summary.value[0].tag): self.assertEqual(expected_value, event.summary.value[0].simple_value) return self.fail("Expected tag %r not found in event file in %r" % (tag, logdir)) else: self.assertEqual(len(events), num_events) self.assertTrue( re.match(tag, events[num_events - offset - 1].summary.value[0].tag)) self.assertLessEqual( expected_value, events[num_events - offset - 1].summary.value[0].simple_value) def getHandle(self, aggregator): # pylint: disable=protected-access if isinstance(aggregator, stats_aggregator.StatsAggregatorV1): return self.evaluate(aggregator.get_summary()) assert isinstance(aggregator, (stats_aggregator.StatsAggregatorV2)) return aggregator._logdir def parallelCallsStats(self, dataset_fn, dataset_names, num_output, function_processing_time=False, check_elements=True): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_fn() dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(num_output): next_ = self.evaluate(next_element()) if check_elements: self.assertAllEqual(np.array([i] * i, dtype=np.int64), next_) handle = self.getHandle(aggregator) for dataset_name in dataset_names: if function_processing_time: self.assertStatisticsHasCount( handle, r"(.*)::execution_time$", float(i + 1), greater_than=True) self.assertStatisticsContains( handle, self.regexForNodeName(dataset_name, "thread_utilization")) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) if function_processing_time: handle = self.getHandle(aggregator) for dataset_name in dataset_names: self.assertStatisticsHasCount( handle, r"(.*)::execution_time$", float(num_output), greater_than=True) # Adding these two methods from summary_test_util, as summary_test_util is in # contrib. def _events_from_file(filepath): """Returns all events in a single event file. Args: filepath: Path to the event file. Returns: A list of all tf.Event protos in the event file. """ records = list(tf_record.tf_record_iterator(filepath)) result = [] for r in records: event = event_pb2.Event() event.ParseFromString(r) result.append(event) return result def _events_from_logdir(logdir): """Returns all events in the single eventfile in logdir. Args: logdir: The directory in which the single event file is sought. Returns: A list of all tf.Event protos from the single event file. Raises: AssertionError: If logdir does not contain exactly one file. """ assert gfile.Exists(logdir) files = gfile.ListDirectory(logdir) assert len(files) == 1, "Found not exactly one file in logdir: %s" % files return _events_from_file(os.path.join(logdir, files[0]))
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/stats_dataset_test_base.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.parse_example_dataset().""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import numpy as np from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.data.experimental.ops import parsing_ops as contrib_parsing_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import test # Helpers for creating Example objects example = example_pb2.Example feature = feature_pb2.Feature features = lambda d: feature_pb2.Features(feature=d) bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v)) int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v)) float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v)) # Helpers for creating SequenceExample objects feature_list = lambda l: feature_pb2.FeatureList(feature=l) feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d) sequence_example = example_pb2.SequenceExample @test_util.run_all_in_graph_and_eager_modes class ParseExampleDatasetTest(test_base.DatasetTestBase): def _compare_output_to_expected(self, dict_tensors, expected_tensors): self.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys())) for k, v in sorted(dict_tensors.items()): expected_v = expected_tensors[k] self.assertValuesEqual(expected_v, v) def _test(self, input_tensor, feature_val, expected_values=None, expected_err=None, create_iterator_twice=False): if expected_err: with self.assertRaisesWithPredicateMatch(expected_err[0], expected_err[1]): dataset = dataset_ops.Dataset.from_tensors(input_tensor).apply( contrib_parsing_ops.parse_example_dataset(feature_val)) get_next = self.getNext(dataset) self.evaluate(get_next()) return else: # Returns dict w/ Tensors and SparseTensors. # Check values. dataset = dataset_ops.Dataset.from_tensors(input_tensor).apply( contrib_parsing_ops.parse_example_dataset(feature_val)) get_next = self.getNext(dataset) result = self.evaluate(get_next()) self._compare_output_to_expected(result, expected_values) with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(get_next()) if create_iterator_twice: get_next = self.getNext(dataset) result = self.evaluate(get_next()) self._compare_output_to_expected(result, expected_values) with self.assertRaises(errors_impl.OutOfRangeError): self.evaluate(get_next()) # Check shapes; if serialized is a Tensor we need its size to # properly check. batch_size = ( self.evaluate(input_tensor).size if isinstance(input_tensor, ops.Tensor) else np.asarray(input_tensor).size) for k, f in feature_val.items(): if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None: self.assertEqual( dataset_ops.get_legacy_output_shapes(dataset)[k].as_list()[0], batch_size) elif isinstance(f, parsing_ops.VarLenFeature): self.assertEqual( dataset_ops.get_legacy_output_shapes(dataset)[k].as_list()[1], None) def testEmptySerializedWithAllDefaults(self): sparse_name = "st_a" a_name = "a" b_name = "b" c_name = "c:has_a_tricky_name" a_default = [0, 42, 0] b_default = np.random.rand(3, 3).astype(bytes) c_default = np.random.rand(2).astype(np.float32) expected_st_a = sparse_tensor.SparseTensorValue( # indices, values, shape np.empty((0, 2), dtype=np.int64), # indices np.empty((0,), dtype=np.int64), # sp_a is DT_INT64 np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0 expected_output = { sparse_name: expected_st_a, a_name: np.array(2 * [[a_default]]), b_name: np.array(2 * [b_default]), c_name: np.array(2 * [c_default]), } self._test( ops.convert_to_tensor(["", ""]), { sparse_name: parsing_ops.VarLenFeature(dtypes.int64), a_name: parsing_ops.FixedLenFeature( (1, 3), dtypes.int64, default_value=a_default), b_name: parsing_ops.FixedLenFeature( (3, 3), dtypes.string, default_value=b_default), c_name: parsing_ops.FixedLenFeature( (2,), dtypes.float32, default_value=c_default), }, expected_values=expected_output, create_iterator_twice=True) @test_util.run_deprecated_v1 def testEmptySerializedWithoutDefaultsShouldFail(self): input_features = { "st_a": parsing_ops.VarLenFeature(dtypes.int64), "a": parsing_ops.FixedLenFeature( (1, 3), dtypes.int64, default_value=[0, 42, 0]), "b": parsing_ops.FixedLenFeature( (3, 3), dtypes.string, default_value=np.random.rand(3, 3).astype(bytes)), # Feature "c" is missing a default, this gap will cause failure. "c": parsing_ops.FixedLenFeature( (2,), dtype=dtypes.float32), } # Edge case where the key is there but the feature value is empty original = example(features=features({"c": feature()})) self._test( [original.SerializeToString()], input_features, expected_err=(errors_impl.InvalidArgumentError, "Feature: c \\(data type: float\\) is required")) # Standard case of missing key and value. self._test( ["", ""], input_features, expected_err=(errors_impl.InvalidArgumentError, "Feature: c \\(data type: float\\) is required")) @test_util.run_deprecated_v1 def testDenseNotMatchingShapeShouldFail(self): original = [ example(features=features({ "a": float_feature([1, 1, 3]), })), example(features=features({ "a": float_feature([-1, -1]), })) ] serialized = [m.SerializeToString() for m in original] self._test( ops.convert_to_tensor(serialized), {"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)}, expected_err=(errors_impl.InvalidArgumentError, "Key: a, Index: 1. Number of float values")) def testDenseDefaultNoShapeShouldFail(self): original = [example(features=features({"a": float_feature([1, 1, 3]),})),] serialized = [m.SerializeToString() for m in original] self._test( ops.convert_to_tensor(serialized), {"a": parsing_ops.FixedLenFeature(None, dtypes.float32)}, expected_err=(ValueError, "Missing shape for feature a")) def testSerializedContainingSparse(self): original = [ example(features=features({ "st_c": float_feature([3, 4]) })), example(features=features({ "st_c": float_feature([]), # empty float list })), example(features=features({ "st_d": feature(), # feature with nothing in it })), example(features=features({ "st_c": float_feature([1, 2, -1]), "st_d": bytes_feature([b"hi"]) })) ] serialized = [m.SerializeToString() for m in original] expected_st_c = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64), np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32), np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3 expected_st_d = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[3, 0]], dtype=np.int64), np.array(["hi"], dtype=bytes), np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1 expected_output = { "st_c": expected_st_c, "st_d": expected_st_d, } self._test( ops.convert_to_tensor(serialized), { "st_c": parsing_ops.VarLenFeature(dtypes.float32), "st_d": parsing_ops.VarLenFeature(dtypes.string) }, expected_values=expected_output, create_iterator_twice=True) def testSerializedContainingSparseFeature(self): original = [ example(features=features({ "val": float_feature([3, 4]), "idx": int64_feature([5, 10]) })), example(features=features({ "val": float_feature([]), # empty float list "idx": int64_feature([]) })), example(features=features({ "val": feature(), # feature with nothing in it # missing idx feature })), example(features=features({ "val": float_feature([1, 2, -1]), "idx": int64_feature([0, 9, 3]) # unsorted })) ] serialized = [m.SerializeToString() for m in original] expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64), np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32), np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13 expected_output = {"sp": expected_sp,} self._test( ops.convert_to_tensor(serialized), {"sp": parsing_ops.SparseFeature(["idx"], "val", dtypes.float32, [13])}, expected_values=expected_output, create_iterator_twice=True) def testSerializedContainingSparseFeatureReuse(self): original = [ example(features=features({ "val1": float_feature([3, 4]), "val2": float_feature([5, 6]), "idx": int64_feature([5, 10]) })), example(features=features({ "val1": float_feature([]), # empty float list "idx": int64_feature([]) })), ] serialized = [m.SerializeToString() for m in original] expected_sp1 = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[0, 5], [0, 10]], dtype=np.int64), np.array([3.0, 4.0], dtype=np.float32), np.array([2, 13], dtype=np.int64)) # batch == 2, max_elems = 13 expected_sp2 = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[0, 5], [0, 10]], dtype=np.int64), np.array([5.0, 6.0], dtype=np.float32), np.array([2, 7], dtype=np.int64)) # batch == 2, max_elems = 13 expected_output = { "sp1": expected_sp1, "sp2": expected_sp2, } self._test( ops.convert_to_tensor(serialized), { "sp1": parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13), "sp2": parsing_ops.SparseFeature( "idx", "val2", dtypes.float32, size=7, already_sorted=True) }, expected_values=expected_output, create_iterator_twice=True) def testSerializedContaining3DSparseFeature(self): original = [ example(features=features({ "val": float_feature([3, 4]), "idx0": int64_feature([5, 10]), "idx1": int64_feature([0, 2]), })), example(features=features({ "val": float_feature([]), # empty float list "idx0": int64_feature([]), "idx1": int64_feature([]), })), example(features=features({ "val": feature(), # feature with nothing in it # missing idx feature })), example(features=features({ "val": float_feature([1, 2, -1]), "idx0": int64_feature([0, 9, 3]), # unsorted "idx1": int64_feature([1, 0, 2]), })) ] serialized = [m.SerializeToString() for m in original] expected_sp = sparse_tensor.SparseTensorValue( # indices np.array([[0, 5, 0], [0, 10, 2], [3, 0, 1], [3, 3, 2], [3, 9, 0]], dtype=np.int64), # values np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32), # shape batch == 4, max_elems = 13 np.array([4, 13, 3], dtype=np.int64)) expected_output = {"sp": expected_sp,} self._test( ops.convert_to_tensor(serialized), { "sp": parsing_ops.SparseFeature(["idx0", "idx1"], "val", dtypes.float32, [13, 3]) }, expected_values=expected_output, create_iterator_twice=True) def testSerializedContainingDense(self): aname = "a" bname = "b*has+a:tricky_name" original = [ example(features=features({ aname: float_feature([1, 1]), bname: bytes_feature([b"b0_str"]), })), example(features=features({ aname: float_feature([-1, -1]), bname: bytes_feature([b""]), })) ] serialized = [m.SerializeToString() for m in original] expected_output = { aname: np.array( [[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1), bname: np.array( ["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1), } # No defaults, values required self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32), bname: parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string), }, expected_values=expected_output, create_iterator_twice=True) # This test is identical as the previous one except # for the creation of 'serialized'. def testSerializedContainingDenseWithConcat(self): aname = "a" bname = "b*has+a:tricky_name" # TODO(lew): Feature appearing twice should be an error in future. original = [ (example(features=features({ aname: float_feature([10, 10]), })), example(features=features({ aname: float_feature([1, 1]), bname: bytes_feature([b"b0_str"]), }))), ( example(features=features({ bname: bytes_feature([b"b100"]), })), example(features=features({ aname: float_feature([-1, -1]), bname: bytes_feature([b"b1"]), })),), ] serialized = [ m.SerializeToString() + n.SerializeToString() for (m, n) in original ] expected_output = { aname: np.array( [[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1), bname: np.array( ["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1), } # No defaults, values required self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32), bname: parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string), }, expected_values=expected_output, create_iterator_twice=True) def testSerializedContainingDenseScalar(self): original = [ example(features=features({ "a": float_feature([1]), })), example(features=features({})) ] serialized = [m.SerializeToString() for m in original] expected_output = { "a": np.array( [[1], [-1]], dtype=np.float32) # 2x1 (column vector) } self._test( ops.convert_to_tensor(serialized), { "a": parsing_ops.FixedLenFeature( (1,), dtype=dtypes.float32, default_value=-1), }, expected_values=expected_output, create_iterator_twice=True) def testSerializedContainingDenseWithDefaults(self): original = [ example(features=features({ "a": float_feature([1, 1]), })), example(features=features({ "b": bytes_feature([b"b1"]), })), example(features=features({ "b": feature() })), ] serialized = [m.SerializeToString() for m in original] expected_output = { "a": np.array( [[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(3, 1, 2, 1), "b": np.array( ["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(3, 1, 1, 1, 1), } self._test( ops.convert_to_tensor(serialized), { "a": parsing_ops.FixedLenFeature( (1, 2, 1), dtype=dtypes.float32, default_value=[3.0, -3.0]), "b": parsing_ops.FixedLenFeature( (1, 1, 1, 1), dtype=dtypes.string, default_value="tmp_str"), }, expected_values=expected_output, create_iterator_twice=True) def testSerializedSparseAndSparseFeatureAndDenseWithNoDefault(self): expected_st_a = sparse_tensor.SparseTensorValue( # indices, values, shape np.empty((0, 2), dtype=np.int64), # indices np.empty((0,), dtype=np.int64), # sp_a is DT_INT64 np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0 expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64), np.array(["a", "b", "c"], dtype="|S"), np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13 original = [ example(features=features({ "c": float_feature([3, 4]), "val": bytes_feature([b"a", b"b"]), "idx": int64_feature([0, 3]) })), example(features=features({ "c": float_feature([1, 2]), "val": bytes_feature([b"c"]), "idx": int64_feature([7]) })) ] serialized = [m.SerializeToString() for m in original] a_default = [1, 2, 3] b_default = np.random.rand(3, 3).astype(bytes) expected_output = { "st_a": expected_st_a, "sp": expected_sp, "a": np.array(2 * [[a_default]]), "b": np.array(2 * [b_default]), "c": np.array( [[3, 4], [1, 2]], dtype=np.float32), } self._test( ops.convert_to_tensor(serialized), { "st_a": parsing_ops.VarLenFeature(dtypes.int64), "sp": parsing_ops.SparseFeature("idx", "val", dtypes.string, 13), "a": parsing_ops.FixedLenFeature( (1, 3), dtypes.int64, default_value=a_default), "b": parsing_ops.FixedLenFeature( (3, 3), dtypes.string, default_value=b_default), # Feature "c" must be provided, since it has no default_value. "c": parsing_ops.FixedLenFeature((2,), dtypes.float32), }, expected_values=expected_output, create_iterator_twice=True) def testerializedContainingSparseAndSparseFeatureWithReuse(self): expected_idx = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64), np.array([0, 3, 7, 1]), np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2 expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64), np.array(["a", "b", "d", "c"], dtype="|S"), np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13 original = [ example(features=features({ "val": bytes_feature([b"a", b"b"]), "idx": int64_feature([0, 3]) })), example(features=features({ "val": bytes_feature([b"c", b"d"]), "idx": int64_feature([7, 1]) })) ] serialized = [m.SerializeToString() for m in original] expected_output = { "idx": expected_idx, "sp": expected_sp, } self._test( ops.convert_to_tensor(serialized), { "idx": parsing_ops.VarLenFeature(dtypes.int64), "sp": parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]), }, expected_values=expected_output, create_iterator_twice=True) def _testSerializedContainingVarLenDenseLargerBatch(self, batch_size): # During parsing, data read from the serialized proto is stored in buffers. # For small batch sizes, a buffer will contain one minibatch entry. # For larger batch sizes, a buffer may contain several minibatch # entries. This test identified a bug where the code that copied # data out of the buffers and into the output tensors assumed each # buffer only contained one minibatch entry. The bug has since been fixed. truth_int = [i for i in range(batch_size)] truth_str = [[("foo%d" % i).encode(), ("bar%d" % i).encode()] for i in range(batch_size)] expected_str = copy.deepcopy(truth_str) # Delete some intermediate entries for i in range(batch_size): col = 1 if np.random.rand() < 0.25: # w.p. 25%, drop out the second entry expected_str[i][col] = b"default" col -= 1 truth_str[i].pop() if np.random.rand() < 0.25: # w.p. 25%, drop out the second entry (possibly again) expected_str[i][col] = b"default" truth_str[i].pop() expected_output = { # Batch size batch_size, 1 time step. "a": np.array(truth_int, dtype=np.int64).reshape(batch_size, 1), # Batch size batch_size, 2 time steps. "b": np.array(expected_str, dtype="|S").reshape(batch_size, 2), } original = [ example(features=features( {"a": int64_feature([truth_int[i]]), "b": bytes_feature(truth_str[i])})) for i in range(batch_size) ] serialized = [m.SerializeToString() for m in original] self._test( ops.convert_to_tensor(serialized, dtype=dtypes.string), { "a": parsing_ops.FixedLenSequenceFeature( shape=(), dtype=dtypes.int64, allow_missing=True, default_value=-1), "b": parsing_ops.FixedLenSequenceFeature( shape=[], dtype=dtypes.string, allow_missing=True, default_value="default"), }, expected_values=expected_output, create_iterator_twice=True) def testSerializedContainingVarLenDenseLargerBatch(self): np.random.seed(3456) for batch_size in (1, 10, 20, 100, 256): self._testSerializedContainingVarLenDenseLargerBatch(batch_size) def testSerializedShapeMismatch(self): aname = "a" bname = "b" cname = "c" original = [ example(features=features({ cname: int64_feature([2]), })), example(features=features({ aname: float_feature([1, 1]), bname: bytes_feature([b"b0_str", b"b1_str"]), })), example(features=features({ aname: float_feature([-1, -1, 2, 2]), bname: bytes_feature([b"b1"]), })), example(features=features({ aname: float_feature([]), cname: int64_feature([3]), })), ] serialized = [m.SerializeToString() for m in original] if context.executing_eagerly(): self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenSequenceFeature((2, 1), dtype=dtypes.float32, allow_missing=True, default_value=[]), bname: parsing_ops.FixedLenSequenceFeature( (2, 1, 1), dtype=dtypes.string, allow_missing=True), }, expected_err=(errors_impl.InvalidArgumentError, "Input to reshape is a tensor with 0 values")) else: self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenSequenceFeature((2, 1), dtype=dtypes.float32, allow_missing=True, default_value=[]), bname: parsing_ops.FixedLenSequenceFeature( (2, 1, 1), dtype=dtypes.string, allow_missing=True), }, expected_err=(ValueError, "Cannot reshape a tensor with 0 elements to shape")) @test_util.run_deprecated_v1 def testSerializedContainingVarLenDense(self): aname = "a" bname = "b" cname = "c" dname = "d" original = [ example(features=features({ cname: int64_feature([2]), })), example( features=features({ aname: float_feature([1, 1]), bname: bytes_feature([b"b0_str", b"b1_str"]), })), example( features=features({ aname: float_feature([-1, -1, 2, 2]), bname: bytes_feature([b"b1"]), })), example( features=features({ aname: float_feature([]), cname: int64_feature([3]), })), ] serialized = [m.SerializeToString() for m in original] expected_output = { aname: np.array( [ [0, 0, 0, 0], [1, 1, 0, 0], [-1, -1, 2, 2], [0, 0, 0, 0], ], dtype=np.float32).reshape(4, 2, 2, 1), bname: np.array( [["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]], dtype=bytes).reshape(4, 2, 1, 1, 1), cname: np.array([2, 0, 0, 3], dtype=np.int64).reshape(4, 1), dname: np.empty(shape=(4, 0), dtype=bytes), } self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenSequenceFeature( (2, 1), dtype=dtypes.float32, allow_missing=True), bname: parsing_ops.FixedLenSequenceFeature( (1, 1, 1), dtype=dtypes.string, allow_missing=True), cname: parsing_ops.FixedLenSequenceFeature( shape=[], dtype=dtypes.int64, allow_missing=True), dname: parsing_ops.FixedLenSequenceFeature( shape=[], dtype=dtypes.string, allow_missing=True), }, expected_values=expected_output, create_iterator_twice=True) # Test with padding values. expected_output_custom_padding = dict(expected_output) expected_output_custom_padding[aname] = np.array( [ [-2, -2, -2, -2], [1, 1, -2, -2], [-1, -1, 2, 2], [-2, -2, -2, -2], ], dtype=np.float32).reshape(4, 2, 2, 1) self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenSequenceFeature( (2, 1), dtype=dtypes.float32, allow_missing=True, default_value=-2.0), bname: parsing_ops.FixedLenSequenceFeature( (1, 1, 1), dtype=dtypes.string, allow_missing=True), cname: parsing_ops.FixedLenSequenceFeature( shape=[], dtype=dtypes.int64, allow_missing=True), dname: parsing_ops.FixedLenSequenceFeature( shape=[], dtype=dtypes.string, allow_missing=True), }, expected_output_custom_padding) # Change number of required values so the inputs are not a # multiple of this size. self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenSequenceFeature( (2, 1), dtype=dtypes.float32, allow_missing=True), bname: parsing_ops.FixedLenSequenceFeature( (2, 1, 1), dtype=dtypes.string, allow_missing=True), }, expected_err=( errors_impl.OpError, "Key: b, Index: 2. " "Number of bytes values is not a multiple of stride length.")) self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenFeature((None, 2, 1), dtype=dtypes.float32), bname: parsing_ops.FixedLenSequenceFeature( (2, 1, 1), dtype=dtypes.string, allow_missing=True), }, expected_err=(ValueError, "First dimension of shape for feature a unknown. " "Consider using FixedLenSequenceFeature.")) self._test( ops.convert_to_tensor(serialized), { cname: parsing_ops.FixedLenFeature( (1, None), dtype=dtypes.int64, default_value=[[1]]), }, expected_err=(ValueError, "All dimensions of shape for feature c need to be known " r"but received \(1, None\).")) self._test( ops.convert_to_tensor(serialized), { aname: parsing_ops.FixedLenSequenceFeature( (2, 1), dtype=dtypes.float32, allow_missing=True), bname: parsing_ops.FixedLenSequenceFeature( (1, 1, 1), dtype=dtypes.string, allow_missing=True), cname: parsing_ops.FixedLenSequenceFeature( shape=[], dtype=dtypes.int64, allow_missing=False), dname: parsing_ops.FixedLenSequenceFeature( shape=[], dtype=dtypes.string, allow_missing=True), }, expected_err=(ValueError, "Unsupported: FixedLenSequenceFeature requires " "allow_missing to be True.")) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/parse_example_dataset_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.SqlDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests import sql_dataset_test_base from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class SqlDatasetTest(sql_dataset_test_base.SqlDatasetTestBase): # Test that SqlDataset can read from a database table. def testReadResultSet(self): for _ in range(2): # Run twice to verify statelessness of db operations. dataset = self._createSqlDataset( query="SELECT first_name, last_name, motto FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string), num_repeats=2) self.assertDatasetProduces( dataset, expected_output=[(b"John", b"Doe", b"Hi!"), (b"Jane", b"Moe", b"Hi again!")] * 2, num_test_iterations=2) # Test that SqlDataset works on a join query. def testReadResultSetJoinQuery(self): get_next = self.getNext( self._createSqlDataset( query="SELECT students.first_name, state, motto FROM students " "INNER JOIN people " "ON students.first_name = people.first_name " "AND students.last_name = people.last_name", output_types=(dtypes.string, dtypes.string, dtypes.string))) self.assertEqual((b"John", b"California", b"Hi!"), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that SqlDataset can read a database entry with a null-terminator # in the middle of the text and place the entry in a `string` tensor. def testReadResultSetNullTerminator(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, favorite_nonsense_word " "FROM students ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string))) self.assertEqual((b"John", b"Doe", b"n\0nsense"), self.evaluate(get_next())) self.assertEqual((b"Jane", b"Moe", b"nonsense\0"), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that SqlDataset works when used on two different queries. # Because the output types of the dataset must be determined at graph-creation # time, the two queries must have the same number and types of columns. def testReadResultSetReuseSqlDataset(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, motto FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string))) self.assertEqual((b"John", b"Doe", b"Hi!"), self.evaluate(get_next())) self.assertEqual((b"Jane", b"Moe", b"Hi again!"), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, state FROM people " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string))) self.assertEqual((b"John", b"Doe", b"California"), self.evaluate(get_next())) self.assertEqual((b"Benjamin", b"Franklin", b"Pennsylvania"), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that an `OutOfRangeError` is raised on the first call to # `get_next_str_only` if result set is empty. def testReadEmptyResultSet(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, motto FROM students " "WHERE first_name = 'Nonexistent'", output_types=(dtypes.string, dtypes.string, dtypes.string))) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that an error is raised when `driver_name` is invalid. def testReadResultSetWithInvalidDriverName(self): with self.assertRaises(errors.InvalidArgumentError): dataset = self._createSqlDataset( driver_name="sqlfake", query="SELECT first_name, last_name, motto FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string)) self.assertDatasetProduces(dataset, expected_output=[]) # Test that an error is raised when a column name in `query` is nonexistent def testReadResultSetWithInvalidColumnName(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, fake_column FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string))) with self.assertRaises(errors.UnknownError): self.evaluate(get_next()) # Test that an error is raised when there is a syntax error in `query`. def testReadResultSetOfQueryWithSyntaxError(self): get_next = self.getNext( self._createSqlDataset( query="SELEmispellECT first_name, last_name, motto FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string))) with self.assertRaises(errors.UnknownError): self.evaluate(get_next()) # Test that an error is raised when the number of columns in `query` # does not match the length of `, output_types`. def testReadResultSetWithMismatchBetweenColumnsAndOutputTypes(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.string, dtypes.string))) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(get_next()) # Test that no results are returned when `query` is an insert query rather # than a select query. In particular, the error refers to the number of # output types passed to the op not matching the number of columns in the # result set of the query (namely, 0 for an insert statement.) def testReadResultSetOfInsertQuery(self): get_next = self.getNext( self._createSqlDataset( query="INSERT INTO students (first_name, last_name, motto) " "VALUES ('Foo', 'Bar', 'Baz'), ('Fizz', 'Buzz', 'Fizzbuzz')", output_types=(dtypes.string, dtypes.string, dtypes.string))) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(get_next()) # Test that `SqlDataset` can read an integer from a SQLite database table and # place it in an `int8` tensor. def testReadResultSetInt8(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, desk_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int8))) self.assertEqual((b"John", 9), self.evaluate(get_next())) self.assertEqual((b"Jane", 127), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a negative or 0-valued integer from a # SQLite database table and place it in an `int8` tensor. def testReadResultSetInt8NegativeAndZero(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, income, favorite_negative_number " "FROM students " "WHERE first_name = 'John' ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int8, dtypes.int8))) self.assertEqual((b"John", 0, -2), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a large (positive or negative) integer from # a SQLite database table and place it in an `int8` tensor. def testReadResultSetInt8MaxValues(self): get_next = self.getNext( self._createSqlDataset( query="SELECT desk_number, favorite_negative_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.int8, dtypes.int8))) self.assertEqual((9, -2), self.evaluate(get_next())) # Max and min values of int8 self.assertEqual((127, -128), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read an integer from a SQLite database table and # place it in an `int16` tensor. def testReadResultSetInt16(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, desk_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int16))) self.assertEqual((b"John", 9), self.evaluate(get_next())) self.assertEqual((b"Jane", 127), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a negative or 0-valued integer from a # SQLite database table and place it in an `int16` tensor. def testReadResultSetInt16NegativeAndZero(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, income, favorite_negative_number " "FROM students " "WHERE first_name = 'John' ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int16, dtypes.int16))) self.assertEqual((b"John", 0, -2), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a large (positive or negative) integer from # a SQLite database table and place it in an `int16` tensor. def testReadResultSetInt16MaxValues(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, favorite_medium_sized_number " "FROM students ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int16))) # Max value of int16 self.assertEqual((b"John", 32767), self.evaluate(get_next())) # Min value of int16 self.assertEqual((b"Jane", -32768), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read an integer from a SQLite database table and # place it in an `int32` tensor. def testReadResultSetInt32(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, desk_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int32))) self.assertEqual((b"John", 9), self.evaluate(get_next())) self.assertEqual((b"Jane", 127), self.evaluate(get_next())) # Test that `SqlDataset` can read a negative or 0-valued integer from a # SQLite database table and place it in an `int32` tensor. def testReadResultSetInt32NegativeAndZero(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, income FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int32))) self.assertEqual((b"John", 0), self.evaluate(get_next())) self.assertEqual((b"Jane", -20000), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a large (positive or negative) integer from # a SQLite database table and place it in an `int32` tensor. def testReadResultSetInt32MaxValues(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, favorite_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int32))) # Max value of int32 self.assertEqual((b"John", 2147483647), self.evaluate(get_next())) # Min value of int32 self.assertEqual((b"Jane", -2147483648), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a numeric `varchar` from a SQLite database # table and place it in an `int32` tensor. def testReadResultSetInt32VarCharColumnAsInt(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, school_id FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int32))) self.assertEqual((b"John", 123), self.evaluate(get_next())) self.assertEqual((b"Jane", 1000), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read an integer from a SQLite database table # and place it in an `int64` tensor. def testReadResultSetInt64(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, desk_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int64))) self.assertEqual((b"John", 9), self.evaluate(get_next())) self.assertEqual((b"Jane", 127), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a negative or 0-valued integer from a # SQLite database table and place it in an `int64` tensor. def testReadResultSetInt64NegativeAndZero(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, income FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int64))) self.assertEqual((b"John", 0), self.evaluate(get_next())) self.assertEqual((b"Jane", -20000), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a large (positive or negative) integer from # a SQLite database table and place it in an `int64` tensor. def testReadResultSetInt64MaxValues(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, favorite_big_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.int64))) # Max value of int64 self.assertEqual((b"John", 9223372036854775807), self.evaluate(get_next())) # Min value of int64 self.assertEqual((b"Jane", -9223372036854775808), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read an integer from a SQLite database table and # place it in a `uint8` tensor. def testReadResultSetUInt8(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, desk_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.uint8))) self.assertEqual((b"John", 9), self.evaluate(get_next())) self.assertEqual((b"Jane", 127), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read the minimum and maximum uint8 values from a # SQLite database table and place them in `uint8` tensors. def testReadResultSetUInt8MinAndMaxValues(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, brownie_points FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.uint8))) # Min value of uint8 self.assertEqual((b"John", 0), self.evaluate(get_next())) # Max value of uint8 self.assertEqual((b"Jane", 255), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read an integer from a SQLite database table # and place it in a `uint16` tensor. def testReadResultSetUInt16(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, desk_number FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.uint16))) self.assertEqual((b"John", 9), self.evaluate(get_next())) self.assertEqual((b"Jane", 127), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read the minimum and maximum uint16 values from a # SQLite database table and place them in `uint16` tensors. def testReadResultSetUInt16MinAndMaxValues(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, account_balance FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.uint16))) # Min value of uint16 self.assertEqual((b"John", 0), self.evaluate(get_next())) # Max value of uint16 self.assertEqual((b"Jane", 65535), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a 0-valued and 1-valued integer from a # SQLite database table and place them as `True` and `False` respectively # in `bool` tensors. def testReadResultSetBool(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, registration_complete FROM students " "ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.bool))) self.assertEqual((b"John", True), self.evaluate(get_next())) self.assertEqual((b"Jane", False), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read an integer that is not 0-valued or 1-valued # from a SQLite database table and place it as `True` in a `bool` tensor. def testReadResultSetBoolNotZeroOrOne(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, favorite_medium_sized_number " "FROM students ORDER BY first_name DESC", output_types=(dtypes.string, dtypes.bool))) self.assertEqual((b"John", True), self.evaluate(get_next())) self.assertEqual((b"Jane", True), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a float from a SQLite database table # and place it in a `float64` tensor. def testReadResultSetFloat64(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, victories FROM townspeople " "ORDER BY first_name", output_types=(dtypes.string, dtypes.string, dtypes.float64))) self.assertEqual((b"George", b"Washington", 20.0), self.evaluate(get_next())) self.assertEqual((b"John", b"Adams", -19.95), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a float from a SQLite database table beyond # the precision of 64-bit IEEE, without throwing an error. Test that # `SqlDataset` identifies such a value as equal to itself. def testReadResultSetFloat64OverlyPrecise(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, accolades FROM townspeople " "ORDER BY first_name", output_types=(dtypes.string, dtypes.string, dtypes.float64))) self.assertEqual( (b"George", b"Washington", 1331241.321342132321324589798264627463827647382647382643874), self.evaluate(get_next())) self.assertEqual( (b"John", b"Adams", 1331241321342132321324589798264627463827647382647382643874.0), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `SqlDataset` can read a float from a SQLite database table, # representing the largest integer representable as a 64-bit IEEE float # such that the previous integer is also representable as a 64-bit IEEE float. # Test that `SqlDataset` can distinguish these two numbers. def testReadResultSetFloat64LargestConsecutiveWholeNumbersNotEqual(self): get_next = self.getNext( self._createSqlDataset( query="SELECT first_name, last_name, triumphs FROM townspeople " "ORDER BY first_name", output_types=(dtypes.string, dtypes.string, dtypes.float64))) self.assertNotEqual((b"George", b"Washington", 9007199254740992.0), self.evaluate(get_next())) self.assertNotEqual((b"John", b"Adams", 9007199254740991.0), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/sql_dataset_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `experimental_slack` option.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import multi_device_iterator_ops from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class PrefetchWithSlackTest(test_base.DatasetTestBase, parameterized.TestCase): @test_util.run_v1_only("b/121264236") def testPrefetchWithSlackOption(self): """Determines slack_period based on num devices attached to iterator.""" dataset = dataset_ops.Dataset.range(10) dataset = dataset.prefetch(1) options = dataset_ops.Options() options.experimental_slack = True dataset = dataset.with_options(options) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"]) dataset = multi_device_iterator._dataset # pylint: disable=protected-access self.assertIn("slack", dataset.options()._static_optimizations()) self.assertIn("slack:slack_period:2", dataset.options()._static_optimization_configs()) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) def testPrefetchWithSlackOptionWithoutIterator(self): """Defaults to slack period of 1 without iterator.""" dataset = dataset_ops.Dataset.range(10) dataset = dataset.prefetch(1) options = dataset_ops.Options() options.experimental_slack = True dataset = dataset.with_options(options) self.assertIn("slack", dataset.options()._static_optimizations()) self.assertIn("slack:slack_period:1", dataset.options()._static_optimization_configs()) self.assertDatasetProduces(dataset, range(10)) def testWithPassthroughDataset(self): """Should still work with a passthrough dataset after prefetch().""" dataset = dataset_ops.Dataset.range(10) dataset = dataset.prefetch(1) dataset = dataset.map(lambda x: x + 1) options = dataset_ops.Options() options.experimental_slack = True dataset = dataset.with_options(options) self.assertDatasetProduces(dataset, range(1, 11)) def testErrorWithoutPrefetch(self): """The rewrite fails if there is no prefetch() in the pipeline.""" dataset = dataset_ops.Dataset.range(10) options = dataset_ops.Options() options.experimental_slack = True dataset = dataset.with_options(options) with self.assertRaises(errors.InvalidArgumentError): get_next = self.getNext(dataset) self.evaluate(get_next()) def testErrorWithInvalidDataset(self): """With a nested dataset op after prefetch, the rewrite should fail.""" dataset = dataset_ops.Dataset.range(10) dataset = dataset.prefetch(1) dataset = dataset.flat_map(dataset_ops.Dataset.from_tensors) options = dataset_ops.Options() options.experimental_slack = True dataset = dataset.with_options(options) with self.assertRaises(errors.InvalidArgumentError): get_next = self.getNext(dataset) self.evaluate(get_next()) if __name__ == "__main__": ops.enable_eager_execution( config=config_pb2.ConfigProto(device_count={"CPU": 3})) test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/prefetch_with_slack_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.dense_to_sparse_batch().""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class DenseToSparseBatchTest(test_base.DatasetTestBase): def testDenseToSparseBatchDataset(self): components = np.random.randint(12, size=(100,)).astype(np.int32) dataset = dataset_ops.Dataset.from_tensor_slices( components).map(lambda x: array_ops.fill([x], x)).apply( batching.dense_to_sparse_batch(4, [12])) get_next = self.getNext(dataset) for start in range(0, len(components), 4): results = self.evaluate(get_next()) self.assertAllEqual([[i, j] for i, c in enumerate(components[start:start + 4]) for j in range(c)], results.indices) self.assertAllEqual( [c for c in components[start:start + 4] for _ in range(c)], results.values) self.assertAllEqual([min(4, len(components) - start), 12], results.dense_shape) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testDenseToSparseBatchDatasetWithUnknownShape(self): components = np.random.randint(5, size=(40,)).astype(np.int32) dataset = dataset_ops.Dataset.from_tensor_slices( components).map(lambda x: array_ops.fill([x, x], x)).apply( batching.dense_to_sparse_batch(4, [5, None])) get_next = self.getNext(dataset) for start in range(0, len(components), 4): results = self.evaluate(get_next()) self.assertAllEqual([[i, j, z] for i, c in enumerate(components[start:start + 4]) for j in range(c) for z in range(c)], results.indices) self.assertAllEqual([ c for c in components[start:start + 4] for _ in range(c) for _ in range(c) ], results.values) self.assertAllEqual([ min(4, len(components) - start), 5, np.max(components[start:start + 4]) ], results.dense_shape) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testDenseToSparseBatchDatasetWithInvalidShape(self): input_tensor = array_ops.constant([[1]]) with self.assertRaisesRegexp(ValueError, "Dimension -2 must be >= 0"): dataset_ops.Dataset.from_tensors(input_tensor).apply( batching.dense_to_sparse_batch(4, [-2])) def testDenseToSparseBatchDatasetShapeErrors(self): def dataset_fn(input_tensor): return dataset_ops.Dataset.from_tensors(input_tensor).apply( batching.dense_to_sparse_batch(4, [12])) # Initialize with an input tensor of incompatible rank. get_next = self.getNext(dataset_fn([[1]])) with self.assertRaisesRegexp(errors.InvalidArgumentError, "incompatible with the row shape"): self.evaluate(get_next()) # Initialize with an input tensor that is larger than `row_shape`. get_next = self.getNext(dataset_fn(np.int32(range(13)))) with self.assertRaisesRegexp(errors.DataLossError, "larger than the row shape"): self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Wrapping / Unwrapping dataset variants.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class WrapDatasetVariantTest(test_base.DatasetTestBase): def testBasic(self): ds = dataset_ops.Dataset.range(100) ds_variant = ds._variant_tensor # pylint: disable=protected-access wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant) unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant(wrapped_variant) variant_ds = dataset_ops._VariantDataset(unwrapped_variant, ds.element_spec) get_next = self.getNext(variant_ds, requires_initialization=True) for i in range(100): self.assertEqual(i, self.evaluate(get_next())) @test_util.run_v1_only("b/123901304") def testSkipEagerGPU(self): ds = dataset_ops.Dataset.range(100) ds_variant = ds._variant_tensor # pylint: disable=protected-access wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant) with ops.device("/gpu:0"): gpu_wrapped_variant = array_ops.identity(wrapped_variant) unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant( gpu_wrapped_variant) variant_ds = dataset_ops._VariantDataset(unwrapped_variant, ds.element_spec) iterator = dataset_ops.make_initializable_iterator(variant_ds) get_next = iterator.get_next() with self.cached_session(): self.evaluate(iterator.initializer) for i in range(100): self.assertEqual(i, self.evaluate(get_next)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/wrap_unwrap_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for MapDefunOp.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time from tensorflow.python.client import session from tensorflow.python.data.experimental.ops import map_defun from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test @test_util.run_v1_only("b/123903858: Add eager and V2 test coverage") class MapDefunTest(test_base.DatasetTestBase): def testNoIntraOpLimit(self): @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def simple_fn(x): return x * 2 + 3 nums = [[1, 2], [3, 4], [5, 6]] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") r = map_defun.map_defun( simple_fn, [elems], [dtypes.int32], [(2,)], max_intra_op_parallelism=0)[0] expected = elems * 2 + 3 self.assertAllEqual(self.evaluate(r), self.evaluate(expected)) def testMapDefunSimple(self): @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def simple_fn(x): return x * 2 + 3 nums = [[1, 2], [3, 4], [5, 6]] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(2,)])[0] expected = elems * 2 + 3 self.assertAllEqual(self.evaluate(r), self.evaluate(expected)) def testMapDefunMismatchedTypes(self): @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def fn(x): return math_ops.cast(x, dtypes.float64) nums = [1, 2, 3, 4, 5, 6] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0] with self.assertRaises(errors.InvalidArgumentError): self.evaluate(r) def testMapDefunReduceDim(self): # Tests where the output has a different rank from the input @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def fn(x): return array_ops.gather(x, 0) nums = [[1, 2], [3, 4], [5, 6]] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0] expected = constant_op.constant([1, 3, 5]) self.assertAllEqual(self.evaluate(r), self.evaluate(expected)) def testMapDefunMultipleOutputs(self): @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def fn(x): return (x, math_ops.cast(x * 2 + 3, dtypes.float64)) nums = [[1, 2], [3, 4], [5, 6]] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") r = map_defun.map_defun(fn, [elems], [dtypes.int32, dtypes.float64], [(2,), (2,)]) expected = [elems, elems * 2 + 3] self.assertAllEqual(self.evaluate(r), self.evaluate(expected)) def testMapDefunShapeInference(self): @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def fn(x): return x nums = [[1, 2], [3, 4], [5, 6]] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])[0] self.assertEqual(result.get_shape(), (3, 2)) def testMapDefunPartialShapeInference(self): @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def fn(x): return x elems = array_ops.placeholder(dtypes.int64, (None, 2)) result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)]) self.assertEqual(result[0].get_shape().as_list(), [None, 2]) def testMapDefunRaisesErrorOnRuntimeShapeMismatch(self): @function.defun(input_signature=[ tensor_spec.TensorSpec(None, dtypes.int32), tensor_spec.TensorSpec(None, dtypes.int32) ]) def fn(x, y): return x, y elems1 = array_ops.placeholder(dtypes.int32) elems2 = array_ops.placeholder(dtypes.int32) result = map_defun.map_defun(fn, [elems1, elems2], [dtypes.int32, dtypes.int32], [(), ()]) with self.cached_session() as sess: with self.assertRaisesWithPredicateMatch( errors.InvalidArgumentError, "All inputs must have the same dimension 0."): sess.run(result, feed_dict={elems1: [1, 2, 3, 4, 5], elems2: [1, 2, 3]}) def testMapDefunRaisesDefunError(self): @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def fn(x): with ops.control_dependencies([check_ops.assert_equal(x, 0)]): return array_ops.identity(x) elems = constant_op.constant([0, 0, 0, 37, 0]) result = map_defun.map_defun(fn, [elems], [dtypes.int32], [()]) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(result) def testMapDefunCancelledCorrectly(self): @function.defun(input_signature=[tensor_spec.TensorSpec([5], dtypes.int64)]) def defun(x): # x has leading dimension 5, this will raise an error return array_ops.gather(x, 10) c = array_ops.tile( array_ops.expand_dims( constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0), [100, 1]) map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0] with self.assertRaisesRegexp(errors.InvalidArgumentError, r"indices = 10 is not in \[0, 5\)"): self.evaluate(map_defun_op) def testMapDefunWithUnspecifiedOutputShape(self): @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def simple_fn(x): res = x * 2 + 3 return (res, res + 1, res + 2) nums = [[1, 2], [3, 4], [5, 6]] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32, dtypes.int32, dtypes.int32], [None, (None,), (2,)]) expected = elems * 2 + 3 self.assertAllEqual(self.evaluate(r[0]), self.evaluate(expected)) self.assertAllEqual(self.evaluate(r[1]), self.evaluate(expected + 1)) self.assertAllEqual(self.evaluate(r[2]), self.evaluate(expected + 2)) def testMapDefunWithDifferentOutputShapeEachRun(self): @function.defun( input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)]) def simple_fn(x): return x * 2 + 3 elems = array_ops.placeholder(dtypes.int32, name="data") r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [None])[0] with session.Session() as sess: self.assertAllEqual(sess.run(r, feed_dict={elems: [0]}), [3]) self.assertAllEqual( sess.run(r, feed_dict={elems: [[0], [1]]}), [[3], [5]]) def testMapDefunWithWrongOutputShape(self): @function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)]) def simple_fn(x): return x * 2 + 3 nums = [[1, 2], [3, 4], [5, 6]] elems = constant_op.constant(nums, dtype=dtypes.int32, name="data") r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(1,)])[0] with self.assertRaises(errors.InvalidArgumentError): self.evaluate(r) def testMapDefunWithInvalidInput(self): @function.defun( input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)]) def simple_fn(x): return x * 2 c = constant_op.constant(2) with self.assertRaises(ValueError): # Fails at graph construction time for inputs with known shapes. r = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [None])[0] p = array_ops.placeholder(dtypes.int32) r = map_defun.map_defun(simple_fn, [p], [dtypes.int32], [None])[0] with session.Session() as sess: with self.assertRaises(errors.InvalidArgumentError): sess.run(r, feed_dict={p: 0}) def testMapDefunWithParentCancellation(self): # Checks that a cancellation of the parent graph is threaded through to # MapDefunOp correctly. @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def simple_fn(x): del x queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ()) # Blocking return queue.dequeue_many(5) c = constant_op.constant([1, 2, 3, 4, 5]) map_defun_op = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [()])[0] with self.cached_session() as sess: thread = self.checkedThread( self.assert_op_cancelled, args=(map_defun_op,)) thread.start() time.sleep(0.2) sess.close() thread.join() def testMapDefunWithCapturedInputs(self): c = constant_op.constant(2) @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def fn(x): return x + c x = constant_op.constant([1, 2, 3, 4]) map_defun_op = map_defun.map_defun(fn, [x], [dtypes.int32], [()])[0] expected = x + c self.assertAllEqual(self.evaluate(expected), self.evaluate(map_defun_op)) def testMapDefunWithVariantTensor(self): @function.defun( input_signature=[tensor_spec.TensorSpec([], dtypes.variant)]) def fn(x): return x st = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant) serialized = array_ops.stack([serialized, serialized]) map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.variant], [None])[0] deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32) expected = sparse_tensor.SparseTensorValue( indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]], values=[1, 2, 1, 2], dense_shape=[2, 3, 4]) actual = self.evaluate(deserialized) self.assertValuesEqual(expected, actual) def testMapDefunWithVariantTensorAsCaptured(self): st = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant) @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)]) def fn(x): del x return serialized x = constant_op.constant([0, 0]) map_defun_op = map_defun.map_defun(fn, [x], [dtypes.variant], [None])[0] deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32) expected = sparse_tensor.SparseTensorValue( indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]], values=[1, 2, 1, 2], dense_shape=[2, 3, 4]) actual = self.evaluate(deserialized) self.assertValuesEqual(expected, actual) def testMapDefunWithStrTensor(self): @function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)]) def fn(x): return x st = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.string) serialized = array_ops.stack([serialized, serialized]) map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.string], [None])[0] deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32) expected = sparse_tensor.SparseTensorValue( indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]], values=[1, 2, 1, 2], dense_shape=[2, 3, 4]) actual = self.evaluate(deserialized) self.assertValuesEqual(expected, actual) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.make_csv_dataset()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import zlib import numpy as np from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class MakeCsvDatasetTest(test_base.DatasetTestBase): def _make_csv_dataset(self, filenames, batch_size, num_epochs=1, **kwargs): return readers.make_csv_dataset( filenames, batch_size=batch_size, num_epochs=num_epochs, **kwargs) def _setup_files(self, inputs, linebreak="\n", compression_type=None): filenames = [] for i, ip in enumerate(inputs): fn = os.path.join(self.get_temp_dir(), "temp_%d.csv" % i) contents = linebreak.join(ip).encode("utf-8") if compression_type is None: with open(fn, "wb") as f: f.write(contents) elif compression_type == "GZIP": with gzip.GzipFile(fn, "wb") as f: f.write(contents) elif compression_type == "ZLIB": contents = zlib.compress(contents) with open(fn, "wb") as f: f.write(contents) else: raise ValueError("Unsupported compression_type", compression_type) filenames.append(fn) return filenames def _next_expected_batch(self, expected_output, expected_keys, batch_size, num_epochs): features = {k: [] for k in expected_keys} for _ in range(num_epochs): for values in expected_output: for n, key in enumerate(expected_keys): features[key].append(values[n]) if len(features[expected_keys[0]]) == batch_size: yield features features = {k: [] for k in expected_keys} if features[expected_keys[0]]: # Leftover from the last batch yield features def _verify_output( self, dataset, batch_size, num_epochs, label_name, expected_output, expected_keys, ): get_next = self.getNext(dataset) for expected_features in self._next_expected_batch( expected_output, expected_keys, batch_size, num_epochs, ): actual_features = self.evaluate(get_next()) if label_name is not None: expected_labels = expected_features.pop(label_name) self.assertAllEqual(expected_labels, actual_features[1]) actual_features = actual_features[0] for k in expected_features.keys(): # Compare features self.assertAllEqual(expected_features[k], actual_features[k]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def _test_dataset(self, inputs, expected_output, expected_keys, batch_size=1, num_epochs=1, label_name=None, **kwargs): """Checks that elements produced by CsvDataset match expected output.""" # Convert str type because py3 tf strings are bytestrings filenames = self._setup_files( inputs, compression_type=kwargs.get("compression_type", None)) dataset = self._make_csv_dataset( filenames, batch_size=batch_size, num_epochs=num_epochs, label_name=label_name, **kwargs) self._verify_output(dataset, batch_size, num_epochs, label_name, expected_output, expected_keys) def testMakeCSVDataset(self): """Tests making a CSV dataset with keys and defaults provided.""" record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, column_defaults=record_defaults, ) def testMakeCSVDataset_withBatchSizeAndEpochs(self): """Tests making a CSV dataset with keys and defaults provided.""" record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=3, num_epochs=10, shuffle=False, header=True, column_defaults=record_defaults, ) def testMakeCSVDataset_withCompressionType(self): """Tests `compression_type` argument.""" record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] label = "col0" for compression_type in ("GZIP", "ZLIB"): self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, column_defaults=record_defaults, compression_type=compression_type, ) def testMakeCSVDataset_withCompressionTypeAndNoColumnNames(self): """Tests `compression_type` argument.""" record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, column_defaults=record_defaults, compression_type="GZIP", ) with self.assertRaisesRegexp(ValueError, "compression_type .ZLIB. is not supported"): self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, column_defaults=record_defaults, compression_type="ZLIB", ) def testMakeCSVDataset_withBadInputs(self): """Tests that exception is raised when input is malformed. """ record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] filenames = self._setup_files(inputs) # Duplicate column names with self.assertRaises(ValueError): self._make_csv_dataset( filenames, batch_size=1, column_defaults=record_defaults, label_name="col0", column_names=column_names * 2) # Label key not one of column names with self.assertRaises(ValueError): self._make_csv_dataset( filenames, batch_size=1, column_defaults=record_defaults, label_name="not_a_real_label", column_names=column_names) def testMakeCSVDataset_withNoLabel(self): """Tests making a CSV dataset with no label provided.""" record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, batch_size=1, num_epochs=1, shuffle=False, header=True, column_defaults=record_defaults, ) def testMakeCSVDataset_withNoHeader(self): """Tests that datasets can be created from CSV files with no header line. """ record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [["0,1,2,3,4", "5,6,7,8,9"], ["10,11,12,13,14", "15,16,17,18,19"]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=False, column_defaults=record_defaults, ) def testMakeCSVDataset_withTypes(self): """Tests that defaults can be a dtype instead of a Tensor for required vals. """ record_defaults = [ dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64, dtypes.string ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x[0] for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x[0] for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, column_defaults=record_defaults, ) def testMakeCSVDataset_withNoColNames(self): """Tests that datasets can be created when column names are not specified. In that case, we should infer the column names from the header lines. """ record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"], [10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, column_defaults=record_defaults, ) def testMakeCSVDataset_withTypeInferenceMismatch(self): # Test that error is thrown when num fields doesn't match columns column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] filenames = self._setup_files(inputs) with self.assertRaises(ValueError): self._make_csv_dataset( filenames, column_names=column_names + ["extra_name"], column_defaults=None, batch_size=2, num_epochs=10) def testMakeCSVDataset_withTypeInference(self): """Tests that datasets can be created when no defaults are specified. In that case, we should infer the types from the first N records. """ column_names = ["col%d" % i for i in range(5)] str_int32_max = str(2**33) inputs = [[ ",".join(x for x in column_names), "0,%s,2.0,3e50,rabbit" % str_int32_max ]] expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, ) def testMakeCSVDataset_withTypeInferenceFallthrough(self): """Tests that datasets can be created when no defaults are specified. Tests on a deliberately tricky file. """ column_names = ["col%d" % i for i in range(5)] str_int32_max = str(2**33) inputs = [[ ",".join(x for x in column_names), ",,,,", "0,0,0.0,0.0,0.0", "0,%s,2.0,3e50,rabbit" % str_int32_max, ",,,,", ]] expected_output = [[0, 0, 0, 0, b""], [0, 0, 0, 0, b"0.0"], [0, 2**33, 2.0, 3e50, b"rabbit"], [0, 0, 0, 0, b""]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=True, ) def testMakeCSVDataset_withNAValuesAndFieldDelim(self): """Tests that datasets can be created from different delim and na_value.""" column_names = ["col%d" % i for i in range(5)] inputs = [["0 1 2 3 4", "5 6 7 8 9"], ["10 11 12 13 14", "15 16 17 ? 19"]] expected_output = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 0, 19]] label = "col0" self._test_dataset( inputs, expected_output=expected_output, expected_keys=column_names, column_names=column_names, label_name=label, batch_size=1, num_epochs=1, shuffle=False, header=False, na_value="?", field_delim=" ", ) def testMakeCSVDataset_withSelectCols(self): record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] str_int32_max = str(2**33) inputs = [[ ",".join(x for x in column_names), "0,%s,2.0,3e50,rabbit" % str_int32_max ]] expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]] select_cols = [1, 3, 4] self._test_dataset( inputs, expected_output=[[x[i] for i in select_cols] for x in expected_output], expected_keys=[column_names[i] for i in select_cols], column_names=column_names, column_defaults=[record_defaults[i] for i in select_cols], batch_size=1, num_epochs=1, shuffle=False, header=True, select_columns=select_cols, ) # Can still do inference without provided defaults self._test_dataset( inputs, expected_output=[[x[i] for i in select_cols] for x in expected_output], expected_keys=[column_names[i] for i in select_cols], column_names=column_names, batch_size=1, num_epochs=1, shuffle=False, header=True, select_columns=select_cols, ) # Can still do column name inference self._test_dataset( inputs, expected_output=[[x[i] for i in select_cols] for x in expected_output], expected_keys=[column_names[i] for i in select_cols], batch_size=1, num_epochs=1, shuffle=False, header=True, select_columns=select_cols, ) # Can specify column names instead of indices self._test_dataset( inputs, expected_output=[[x[i] for i in select_cols] for x in expected_output], expected_keys=[column_names[i] for i in select_cols], column_names=column_names, batch_size=1, num_epochs=1, shuffle=False, header=True, select_columns=[column_names[i] for i in select_cols], ) def testMakeCSVDataset_withSelectColsError(self): record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] column_names = ["col%d" % i for i in range(5)] str_int32_max = str(2**33) inputs = [[ ",".join(x for x in column_names), "0,%s,2.0,3e50,rabbit" % str_int32_max ]] select_cols = [1, 3, 4] filenames = self._setup_files(inputs) with self.assertRaises(ValueError): # Mismatch in number of defaults and number of columns selected, # should raise an error self._make_csv_dataset( filenames, batch_size=1, column_defaults=record_defaults, column_names=column_names, select_columns=select_cols) with self.assertRaises(ValueError): # Invalid column name should raise an error self._make_csv_dataset( filenames, batch_size=1, column_defaults=[[0]], column_names=column_names, label_name=None, select_columns=["invalid_col_name"]) def testMakeCSVDataset_withShuffle(self): record_defaults = [ constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.int64), constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.float64), constant_op.constant([], dtypes.string) ] def str_series(st): return ",".join(str(i) for i in range(st, st + 5)) column_names = ["col%d" % i for i in range(5)] inputs = [ [",".join(x for x in column_names) ] + [str_series(5 * i) for i in range(15)], [",".join(x for x in column_names)] + [str_series(5 * i) for i in range(15, 20)], ] filenames = self._setup_files(inputs) total_records = 20 for batch_size in [1, 2]: # Test that shuffling with the same seed produces the same result dataset1 = self._make_csv_dataset( filenames, column_defaults=record_defaults, column_names=column_names, batch_size=batch_size, header=True, shuffle=True, shuffle_seed=5, num_epochs=2, ) dataset2 = self._make_csv_dataset( filenames, column_defaults=record_defaults, column_names=column_names, batch_size=batch_size, header=True, shuffle=True, shuffle_seed=5, num_epochs=2, ) next1 = self.getNext(dataset1) next2 = self.getNext(dataset2) for _ in range(total_records // batch_size): batch1 = nest.flatten(self.evaluate(next1())) batch2 = nest.flatten(self.evaluate(next2())) for i in range(len(batch1)): self.assertAllEqual(batch1[i], batch2[i]) # Test that shuffling with a different seed produces different results dataset1 = self._make_csv_dataset( filenames, column_defaults=record_defaults, column_names=column_names, batch_size=batch_size, header=True, shuffle=True, shuffle_seed=5, num_epochs=2, ) dataset2 = self._make_csv_dataset( filenames, column_defaults=record_defaults, column_names=column_names, batch_size=batch_size, header=True, shuffle=True, shuffle_seed=6, num_epochs=2, ) next1 = self.getNext(dataset1) next2 = self.getNext(dataset2) all_equal = False for _ in range(total_records // batch_size): batch1 = nest.flatten(self.evaluate(next1())) batch2 = nest.flatten(self.evaluate(next2())) for i in range(len(batch1)): all_equal = all_equal and np.array_equal(batch1[i], batch2[i]) self.assertFalse(all_equal) def testIndefiniteRepeatShapeInference(self): column_names = ["col%d" % i for i in range(5)] inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [ ",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19" ]] filenames = self._setup_files(inputs) dataset = self._make_csv_dataset(filenames, batch_size=32, num_epochs=None) for shape in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)): self.assertEqual(32, shape[0]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `SnapshotDataset` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time from absl.testing import parameterized from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base from tensorflow.python.data.experimental.ops import snapshot from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers as core_readers from tensorflow.python.framework import combinations from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test class SnapshotDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase, parameterized.TestCase): def setUp(self): super(SnapshotDatasetTest, self).setUp() self.removeTFRecords() def removeTFRecords(self): for filename in self.test_filenames: os.remove(filename) self.test_filenames = [] def setUpTFRecord(self, num_files=10, num_records=10): self._num_files = num_files self._num_records = num_records self.test_filenames = self._createFiles() def makeSnapshotDirectory(self): tmpdir = self.get_temp_dir() tmpdir = os.path.join(tmpdir, "snapshot") os.mkdir(tmpdir) return tmpdir def assertSnapshotDirectoryContains( self, directory, num_fingerprints, num_runs_per_fp, num_snapshot_files): dirlist = os.listdir(directory) self.assertLen(dirlist, num_fingerprints) for i in range(num_fingerprints): fingerprint_dir = os.path.join(directory, dirlist[i]) fingerprint_dir_list = sorted(os.listdir(fingerprint_dir)) self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1) self.assertEqual(fingerprint_dir_list[num_runs_per_fp], "snapshot.metadata") for j in range(num_runs_per_fp): run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j]) run_dirlist = sorted(os.listdir(run_dir)) self.assertLen(run_dirlist, num_snapshot_files) file_counter = 0 for filename in run_dirlist: self.assertEqual(filename, "%08d.snapshot" % file_counter) file_counter += 1 @combinations.generate(test_base.default_test_combinations()) def testWriteDifferentPipelinesInOneDirectory(self): tmpdir = self.makeSnapshotDirectory() dataset = dataset_ops.Dataset.range(1000) dataset = dataset.apply(snapshot.snapshot(tmpdir)) self.assertDatasetProduces(dataset, list(range(1000))) dataset = dataset_ops.Dataset.range(1001) dataset = dataset.apply(snapshot.snapshot(tmpdir)) self.assertDatasetProduces(dataset, list(range(1001))) self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1) @combinations.generate(test_base.default_test_combinations()) def testWriteSnapshotMultipleSimultaneous(self): tmpdir = self.makeSnapshotDirectory() dataset1 = dataset_ops.Dataset.range(1000) dataset1 = dataset1.apply(snapshot.snapshot(tmpdir)) next1 = self.getNext(dataset1) dataset2 = dataset_ops.Dataset.range(1000) dataset2 = dataset2.apply(snapshot.snapshot(tmpdir)) next2 = self.getNext(dataset2) for _ in range(1000): self.evaluate(next1()) self.evaluate(next2()) # we check that only one copy of the metadata has been written, and the # one that lost the race would be in passthrough mode. self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1) @combinations.generate(test_base.default_test_combinations()) def testGetNextCreatesDir(self): tmpdir = self.makeSnapshotDirectory() # We create two iterators but call getNext on only one. dataset1 = dataset_ops.Dataset.range(1000) dataset1 = dataset1.apply(snapshot.snapshot(tmpdir)) next1 = self.getNext(dataset1) dataset2 = dataset_ops.Dataset.range(1001) dataset2 = dataset2.apply(snapshot.snapshot(tmpdir)) _ = self.getNext(dataset2) for _ in range(1000): self.evaluate(next1()) # We check that only one directory is created. self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( compression=[snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP]))) def testWriteSnapshotSimpleSuccessful(self, compression): tmpdir = self.makeSnapshotDirectory() dataset = dataset_ops.Dataset.range(1000) dataset = dataset.apply(snapshot.snapshot(tmpdir, compression=compression)) self.assertDatasetProduces(dataset, list(range(1000))) self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1) @combinations.generate(test_base.default_test_combinations()) def testWriteSnapshotRepeatAfterwards(self): tmpdir = self.makeSnapshotDirectory() dataset = dataset_ops.Dataset.range(10) dataset = dataset.apply(snapshot.snapshot(tmpdir)) dataset = dataset.repeat(10) self.assertDatasetProduces(dataset, list(range(10)) * 10) self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( compression=[snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP]))) def testReadSnapshotBackAfterWrite(self, compression): self.setUpTFRecord() filenames = self.test_filenames expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in range(0, 10) ] tmpdir = self.makeSnapshotDirectory() dataset = core_readers._TFRecordDataset(filenames) dataset = dataset.apply(snapshot.snapshot(tmpdir, compression=compression)) self.assertDatasetProduces(dataset, expected) # remove the original files and try to read the data back only from snapshot self.removeTFRecords() dataset2 = core_readers._TFRecordDataset(filenames) dataset2 = dataset2.apply(snapshot.snapshot( tmpdir, compression=compression)) self.assertDatasetProduces(dataset2, expected) @combinations.generate(test_base.default_test_combinations()) def testReadSnapshotParallelAfterWrite(self): self.setUpTFRecord(10, 4000) filenames = self.test_filenames expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in range(0, 4000) ] tmpdir = self.makeSnapshotDirectory() dataset = core_readers._TFRecordDataset(filenames) dataset = dataset.apply( snapshot.snapshot( tmpdir, shard_size_bytes=1024 * 1024, num_reader_threads=2, reader_buffer_size=10)) self.assertDatasetProduces(dataset, expected, assert_items_equal=True) # remove the original files and try to read the data back only from # snapshot. self.removeTFRecords() dataset2 = core_readers._TFRecordDataset(filenames) dataset2 = dataset2.apply( snapshot.snapshot( tmpdir, shard_size_bytes=1024 * 1024, num_reader_threads=2, reader_buffer_size=10)) self.assertDatasetProduces(dataset2, expected, assert_items_equal=True) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.times( combinations.combine( compression=[snapshot.COMPRESSION_NONE, snapshot.COMPRESSION_GZIP]), combinations.combine(threads=2, size=[1, 2]) + combinations.combine(threads=8, size=[1, 4, 8])))) def testReadSnapshotBackAfterMultiThreadedWrite( self, compression, threads, size): self.setUpTFRecord() filenames = self.test_filenames expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in range(0, 10) ] tmpdir = self.makeSnapshotDirectory() dataset = core_readers._TFRecordDataset(filenames) dataset = dataset.apply( snapshot.snapshot( tmpdir, compression=compression, num_writer_threads=threads, writer_buffer_size=size)) self.assertDatasetProduces(dataset, expected) # remove the original files and try to read the data back only from # snapshot self.removeTFRecords() dataset2 = core_readers._TFRecordDataset(filenames) dataset2 = dataset2.apply( snapshot.snapshot(tmpdir, compression=compression)) self.assertDatasetProduces(dataset2, expected, assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testSameFingerprintWithDifferentInitializationOrder(self): tmpdir = self.makeSnapshotDirectory() dataset1 = dataset_ops.Dataset.range(0, 100) dataset2 = dataset_ops.Dataset.range(100, 200) dataset3 = dataset_ops.Dataset.range(200, 300) dataset = dataset1.concatenate(dataset2).concatenate(dataset3) dataset = dataset.apply(snapshot.snapshot(tmpdir)) self.assertDatasetProduces(dataset, list(range(300))) dataset4 = dataset_ops.Dataset.range(200, 300) dataset5 = dataset_ops.Dataset.range(100, 200) dataset6 = dataset_ops.Dataset.range(0, 100) dataset = dataset6.concatenate(dataset5).concatenate(dataset4) dataset = dataset.apply(snapshot.snapshot(tmpdir)) self.assertDatasetProduces(dataset, list(range(300))) self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1) @combinations.generate(test_base.default_test_combinations()) def testExpiredSnapshotRewrite(self): tmpdir = self.makeSnapshotDirectory() dataset1 = dataset_ops.Dataset.range(1000) dataset1 = dataset1.apply( snapshot.snapshot(tmpdir, pending_snapshot_expiry_seconds=1)) next1 = self.getNext(dataset1) # Don't finish reading dataset1, so it is never finalized for _ in range(500): self.evaluate(next1()) self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1) time.sleep(2) # Creating dataset2 after we run through dataset1 due to eager mode, where # the snapshot state is determined immediately upon dataset creation. We # only want to determine the snapshot state for dataset2 after the first # snapshot has expired. dataset2 = dataset_ops.Dataset.range(1000) dataset2 = dataset2.apply( snapshot.snapshot(tmpdir, pending_snapshot_expiry_seconds=1)) next2 = self.getNext(dataset2) for _ in range(500): self.evaluate(next2()) self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1) @combinations.generate(test_base.default_test_combinations()) def testSpecifyShardSize(self): tmpdir = self.makeSnapshotDirectory() dataset = dataset_ops.Dataset.from_tensor_slices([1.0]) dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024])) dataset = dataset.repeat(10) dataset = dataset.apply( snapshot.snapshot(tmpdir, shard_size_bytes=10 * 1024 * 1024)) next_fn = self.getNext(dataset) for _ in range(10): self.evaluate(next_fn()) self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 4) @combinations.generate(test_base.default_test_combinations()) def testAdditionalOperationsAfterReadBack(self): self.setUpTFRecord() filenames = self.test_filenames expected = [ b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in range(0, 10) ] tmpdir = self.makeSnapshotDirectory() dataset = core_readers._TFRecordDataset(filenames) dataset = dataset.apply(snapshot.snapshot(tmpdir)) self.assertDatasetProduces(dataset, expected) # remove the original files and try to read the data back only from snapshot self.removeTFRecords() dataset2 = core_readers._TFRecordDataset(filenames) dataset2 = dataset2.apply(snapshot.snapshot(tmpdir)) self.assertDatasetProduces(dataset2, expected) expected_after = [ b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension for f in range(0, 10) for r in range(0, 10) ] dataset3 = core_readers._TFRecordDataset(filenames) dataset3 = dataset3.apply(snapshot.snapshot(tmpdir)) dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000)) self.assertDatasetProduces(dataset3, expected_after) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/snapshot_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the private `override_threadpool()` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading from absl.testing import parameterized import numpy as np from tensorflow.core.framework import graph_pb2 from tensorflow.python.data.experimental.ops import threading_options from tensorflow.python.data.experimental.ops import threadpool from tensorflow.python.data.experimental.ops import unique from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import script_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class OverrideThreadpoolTest(test_base.DatasetTestBase, parameterized.TestCase): def _testNumThreadsHelper(self, num_threads, override_threadpool_fn): def get_thread_id(_): # Python creates a dummy thread object to represent the current # thread when called from an "alien" thread (such as a # `PrivateThreadPool` thread in this case). It does not include # the TensorFlow-given display name, but it has a unique # identifier that maps one-to-one with the underlying OS thread. return np.array(threading.current_thread().ident).astype(np.int64) dataset = ( dataset_ops.Dataset.range(1000).map( lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64), num_parallel_calls=32).apply(unique.unique())) dataset = override_threadpool_fn(dataset) next_element = self.getNext(dataset, requires_initialization=True) thread_ids = [] try: while True: thread_ids.append(self.evaluate(next_element())) except errors.OutOfRangeError: pass self.assertLen(thread_ids, len(set(thread_ids))) self.assertNotEmpty(thread_ids) if num_threads: # NOTE(mrry): We don't control the thread pool scheduling, and # so cannot guarantee that all of the threads in the pool will # perform work. self.assertLessEqual(len(thread_ids), num_threads) @parameterized.named_parameters( ("1", 1, None), ("2", 2, None), ("3", 4, None), ("4", 8, None), ("5", 16, None), ("6", 4, -1), ("7", 4, 0), ("8", 4, 1), ("9", 4, 4), ) def testNumThreadsDeprecated(self, num_threads, max_intra_op_parallelism): def override_threadpool_fn(dataset): return threadpool.override_threadpool( dataset, threadpool.PrivateThreadPool( num_threads, max_intra_op_parallelism=max_intra_op_parallelism, display_name="private_thread_pool_%d" % num_threads)) self._testNumThreadsHelper(num_threads, override_threadpool_fn) @parameterized.named_parameters( ("1", 1, None), ("2", 2, None), ("3", 4, None), ("4", 8, None), ("5", 16, None), ("6", None, 0), ("7", None, 1), ("8", None, 4), ("9", 4, 0), ("10", 4, 1), ("11", 4, 4), ("12", None, None), ) def testNumThreads(self, num_threads, max_intra_op_parallelism): def override_threadpool_fn(dataset): t_options = threading_options.ThreadingOptions() if max_intra_op_parallelism is not None: t_options.max_intra_op_parallelism = max_intra_op_parallelism if num_threads is not None: t_options.private_threadpool_size = num_threads options = dataset_ops.Options() options.experimental_threading = t_options return dataset.with_options(options) self._testNumThreadsHelper(num_threads, override_threadpool_fn) def testMaxIntraOpParallelismAsGraphDefInternal(self): dataset = dataset_ops.Dataset.from_tensors(0) dataset = dataset_ops._MaxIntraOpParallelismDataset(dataset, 1) graph = graph_pb2.GraphDef().FromString( self.evaluate(dataset._as_serialized_graph())) self.assertTrue( any([node.op != "MaxIntraOpParallelismDataset" for node in graph.node])) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/override_threadpool_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Eager mode tests for the experimental `replicate` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl.testing import parameterized from tensorflow.core.protobuf import cluster_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import tensorflow_server_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.compat import compat from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import combinations from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test from tensorflow.python.training import server_lib class LocalReplicateTest(test_base.DatasetTestBase, parameterized.TestCase): def __init__(self, methodName="runTest"): # pylint: disable=invalid-name super(LocalReplicateTest, self).__init__(methodName) self._device0 = "/device:CPU:0" self._device1 = "/device:CPU:1" self._device2 = "/device:CPU:2" @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph", "eager"])) def testBasic(self): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): self.assertDatasetProduces(dataset0, range(100)) with ops.device(self._device1): self.assertDatasetProduces(dataset1, range(100)) with ops.device(self._device2): self.assertDatasetProduces(dataset2, range(100)) @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph", "eager"])) def testVariableInput(self): with ops.device(self._device0): counter_var = variable_scope.get_variable( "counter", (), dtypes.int32, use_resource=True) dataset0 = dataset_ops.Dataset.range(100).map( lambda _: counter_var.assign_add(1)) # We don't support stateful ops in functions as of now. with self.assertRaises(errors.FailedPreconditionError): replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) self.evaluate(replicated_ds[self._device1]._variant_tensor) @combinations.generate( combinations.combine(tf_api_version=[1], mode=["graph", "eager"])) def testWhitelistStatefulOp(self): with compat.forward_compatibility_horizon(2019, 9, 12): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100).map( lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda [], minval=1, maxval=10, dtype=dtypes.float32)) opt = dataset_ops.Options() opt.experimental_stateful_whitelist = ["RandomUniform"] dataset0 = dataset0.with_options(opt) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): get_next0 = self.getNext(dataset0) with ops.device(self._device1): get_next1 = self.getNext(dataset1) with ops.device(self._device2): get_next2 = self.getNext(dataset2) for _ in range(100): get_next0() get_next1() get_next2() JOB_NAME = "remote_device" def _get_server_def(job_name, local_server_port, remote_server_addresses, task_index): """Returns a server def with a single job + multiple tasks.""" cluster_def = cluster_pb2.ClusterDef() job_def = cluster_def.job.add() job_def.name = job_name job_def.tasks[0] = "localhost:%d" % local_server_port for i, remote_server_address in enumerate(remote_server_addresses, start=1): job_def.tasks[i] = remote_server_address server_def = tensorflow_server_pb2.ServerDef( cluster=cluster_def, job_name=job_name, task_index=task_index, protocol="grpc") return server_def # Pure eager mode test that sets up a cluster of processes. class RemoteReplicateTest(test_base.DatasetTestBase, parameterized.TestCase): def __init__(self, methodName="runTest"): # pylint: disable=invalid-name super(RemoteReplicateTest, self).__init__(methodName) self._cached_server1 = server_lib.Server.create_local_server() self._cached_server2 = server_lib.Server.create_local_server() os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1" self._cached_server1_target = self._cached_server1.target[len("grpc://"):] self._cached_server2_target = self._cached_server2.target[len("grpc://"):] self._device0 = "/job:%s/replica:0/task:0/device:CPU:0" % JOB_NAME self._device1 = "/job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME self._device2 = "/job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME def setUp(self): super(RemoteReplicateTest, self).setUp() # Start the local server. local_port = pywrap_tensorflow.TF_PickUnusedPortOrDie() context.set_server_def( server_def=_get_server_def( JOB_NAME, local_server_port=local_port, remote_server_addresses=[ self._cached_server1_target, self._cached_server2_target ], task_index=0)) @combinations.generate( combinations.combine(tf_api_version=[2], mode=["eager"])) def testBasic(self): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): self.assertDatasetProduces(dataset0, range(100)) with ops.device(self._device1): self.assertDatasetProduces(dataset1, range(100)) with ops.device(self._device2): self.assertDatasetProduces(dataset2, range(100)) @combinations.generate( combinations.combine(tf_api_version=[2], mode=["eager"])) def testMap(self): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100).map(lambda x: x * 2) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): self.assertDatasetProduces(dataset0, range(0, 200, 2)) with ops.device(self._device1): self.assertDatasetProduces(dataset1, range(0, 200, 2)) with ops.device(self._device2): self.assertDatasetProduces(dataset2, range(0, 200, 2)) @combinations.generate( combinations.combine(tf_api_version=[2], mode=["eager"])) def testVariableInput(self): with ops.device(self._device0): counter_var = variable_scope.get_variable( "counter", (), dtypes.int32, use_resource=True) dataset0 = dataset_ops.Dataset.range(100).map( lambda _: counter_var.assign_add(1)) # We don't support stateful ops in functions as of now. with self.assertRaises(errors.FailedPreconditionError): replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) self.evaluate(replicated_ds[self._device1]._variant_tensor) @combinations.generate( combinations.combine(tf_api_version=[2], mode=["eager"])) def testWhitelistStatefulOp(self): with compat.forward_compatibility_horizon(2019, 9, 12): with ops.device(self._device0): dataset0 = dataset_ops.Dataset.range(100).map( lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda [], minval=1, maxval=10, dtype=dtypes.float32)) opt = dataset_ops.Options() opt.experimental_stateful_whitelist = ["RandomUniform"] dataset0 = dataset0.with_options(opt) replicated_ds = distribute.replicate(dataset0, [self._device1, self._device2]) dataset1 = replicated_ds[self._device1] dataset2 = replicated_ds[self._device2] with ops.device(self._device0): get_next0 = self.getNext(dataset0) with ops.device(self._device1): get_next1 = self.getNext(dataset1) with ops.device(self._device2): get_next2 = self.getNext(dataset2) for _ in range(100): get_next0() get_next1() get_next2() if __name__ == "__main__": ops.enable_eager_execution( config=config_pb2.ConfigProto(device_count={"CPU": 3})) test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/replicate_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.{from,to}_variant()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class VariantTest(test_base.DatasetTestBase): def testRoundtripRange(self): dataset = dataset_ops.Dataset.range(10) variant = dataset_ops.to_variant(dataset) dataset = dataset_ops.from_variant(variant, dataset_ops.get_structure(dataset)) self.assertDatasetProduces(dataset, range(10)) self.assertEqual(self.evaluate(cardinality.cardinality(dataset)), 10) def testRoundtripMap(self): dataset = dataset_ops.Dataset.range(10).map(lambda x: x*x) variant = dataset_ops.to_variant(dataset) dataset = dataset_ops.from_variant(variant, dataset_ops.get_structure(dataset)) self.assertDatasetProduces(dataset, [x * x for x in range(10)]) self.assertEqual(self.evaluate(cardinality.cardinality(dataset)), 10) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/variant_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the experimental input pipeline ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.ops import interleave_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class DirectedInterleaveDatasetTest(test_base.DatasetTestBase): def testBasic(self): selector_dataset = dataset_ops.Dataset.range(10).repeat(100) input_datasets = [ dataset_ops.Dataset.from_tensors(i).repeat(100) for i in range(10) ] dataset = interleave_ops._DirectedInterleaveDataset(selector_dataset, input_datasets) next_element = self.getNext(dataset) for _ in range(100): for i in range(10): self.assertEqual(i, self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def _normalize(self, vec): return vec / vec.sum() def _chi2(self, expected, actual): actual = np.asarray(actual) expected = np.asarray(expected) diff = actual - expected chi2 = np.sum(diff * diff / expected, axis=0) return chi2 def _testSampleFromDatasetsHelper(self, weights, num_datasets, num_samples): # Create a dataset that samples each integer in `[0, num_datasets)` # with probability given by `weights[i]`. dataset = interleave_ops.sample_from_datasets([ dataset_ops.Dataset.from_tensors(i).repeat(None) for i in range(num_datasets) ], weights) dataset = dataset.take(num_samples) next_element = self.getNext(dataset) freqs = np.zeros([num_datasets]) for _ in range(num_samples): freqs[self.evaluate(next_element())] += 1 with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) return freqs def testSampleFromDatasets(self): random_seed.set_random_seed(1619) num_samples = 5000 rand_probs = self._normalize(np.random.random_sample((15,))) # Use chi-squared test to assert that the observed distribution matches the # expected distribution. Based on the implementation in # "third_party/tensorflow/python/kernel_tests/multinomial_op_test.py". for probs in [[.85, .05, .1], rand_probs, [1.]]: probs = np.asarray(probs) classes = len(probs) freqs = self._testSampleFromDatasetsHelper(probs, classes, num_samples) self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2) # Also check that `weights` as a dataset samples correctly. probs_ds = dataset_ops.Dataset.from_tensors(probs).repeat() freqs = self._testSampleFromDatasetsHelper(probs_ds, classes, num_samples) self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2) def testSelectFromDatasets(self): words = [b"foo", b"bar", b"baz"] datasets = [dataset_ops.Dataset.from_tensors(w).repeat() for w in words] choice_array = np.random.randint(3, size=(15,), dtype=np.int64) choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array) dataset = interleave_ops.choose_from_datasets(datasets, choice_dataset) next_element = self.getNext(dataset) for i in choice_array: self.assertEqual(words[i], self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testErrors(self): with self.assertRaisesRegexp(ValueError, r"vector of length `len\(datasets\)`"): interleave_ops.sample_from_datasets( [dataset_ops.Dataset.range(10), dataset_ops.Dataset.range(20)], weights=[0.25, 0.25, 0.25, 0.25]) with self.assertRaisesRegexp(TypeError, "`tf.float32` or `tf.float64`"): interleave_ops.sample_from_datasets( [dataset_ops.Dataset.range(10), dataset_ops.Dataset.range(20)], weights=[1, 1]) with self.assertRaisesRegexp(TypeError, "must have the same type"): interleave_ops.sample_from_datasets([ dataset_ops.Dataset.from_tensors(0), dataset_ops.Dataset.from_tensors(0.0) ]) with self.assertRaisesRegexp(TypeError, "tf.int64"): interleave_ops.choose_from_datasets([ dataset_ops.Dataset.from_tensors(0), dataset_ops.Dataset.from_tensors(1) ], choice_dataset=dataset_ops.Dataset.from_tensors(1.0)) with self.assertRaisesRegexp(TypeError, "scalar"): interleave_ops.choose_from_datasets([ dataset_ops.Dataset.from_tensors(0), dataset_ops.Dataset.from_tensors(1) ], choice_dataset=dataset_ops.Dataset.from_tensors([1.0])) with self.assertRaisesRegexp(errors.InvalidArgumentError, "out of range"): dataset = interleave_ops.choose_from_datasets( [dataset_ops.Dataset.from_tensors(0)], choice_dataset=dataset_ops.Dataset.from_tensors( constant_op.constant(1, dtype=dtypes.int64))) next_element = self.getNext(dataset) self.evaluate(next_element()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/directed_interleave_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.cardinality()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class NumElementsTest(test_base.DatasetTestBase, parameterized.TestCase): """Tests for `tf.data.experimental.cardinality()`.""" @parameterized.named_parameters( # pylint: disable=g-long-lambda ("Batch1", lambda: dataset_ops.Dataset.range(5).batch(2, drop_remainder=True), 2), ("Batch2", lambda: dataset_ops.Dataset.range(5).batch(2, drop_remainder=False), 3), ("Batch3", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).batch(2), cardinality.UNKNOWN), ("Batch4", lambda: dataset_ops.Dataset.range(5).repeat().batch(2), cardinality.INFINITE), ("Cache1", lambda: dataset_ops.Dataset.range(5).cache(), 5), ("Cache2", lambda: dataset_ops.Dataset.range(5).cache("foo"), 5), ("Concatenate1", lambda: dataset_ops.Dataset.range(5).concatenate( dataset_ops.Dataset.range(5)), 10), ("Concatenate2", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate( dataset_ops.Dataset.range(5)), cardinality.UNKNOWN), ("Concatenate3", lambda: dataset_ops.Dataset.range(5).repeat(). concatenate(dataset_ops.Dataset.range(5)), cardinality.INFINITE), ("Concatenate4", lambda: dataset_ops.Dataset.range(5).concatenate( dataset_ops.Dataset.range(5).filter(lambda _: True)), cardinality.UNKNOWN), ("Concatenate5", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate( dataset_ops.Dataset.range(5).filter(lambda _: True)), cardinality.UNKNOWN), ("Concatenate6", lambda: dataset_ops.Dataset.range(5).repeat(). concatenate(dataset_ops.Dataset.range(5).filter(lambda _: True)), cardinality.INFINITE), ("Concatenate7", lambda: dataset_ops.Dataset.range(5).concatenate( dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE), ("Concatenate8", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate( dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE), ("Concatenate9", lambda: dataset_ops.Dataset.range(5).repeat().concatenate( dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE), ("FlatMap", lambda: dataset_ops.Dataset.range(5).flat_map( lambda _: dataset_ops.Dataset.from_tensors(0)), cardinality.UNKNOWN), ("Filter", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True), cardinality.UNKNOWN), ("FromTensors1", lambda: dataset_ops.Dataset.from_tensors(0), 1), ("FromTensors2", lambda: dataset_ops.Dataset.from_tensors((0, 1)), 1), ("FromTensorSlices1", lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0]), 3), ("FromTensorSlices2", lambda: dataset_ops.Dataset.from_tensor_slices(([0, 0, 0], [1, 1, 1])), 3), ("Interleave1", lambda: dataset_ops.Dataset.range(5).interleave( lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1), cardinality.UNKNOWN), ("Interleave2", lambda: dataset_ops.Dataset.range(5).interleave( lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1, num_parallel_calls=1), cardinality.UNKNOWN), ("Map1", lambda: dataset_ops.Dataset.range(5).map(lambda x: x), 5), ("Map2", lambda: dataset_ops.Dataset.range(5).map( lambda x: x, num_parallel_calls=1), 5), ("PaddedBatch1", lambda: dataset_ops.Dataset.range(5).padded_batch( 2, [], drop_remainder=True), 2), ("PaddedBatch2", lambda: dataset_ops.Dataset.range(5).padded_batch( 2, [], drop_remainder=False), 3), ("PaddedBatch3", lambda: dataset_ops.Dataset.range(5).filter( lambda _: True).padded_batch(2, []), cardinality.UNKNOWN), ("PaddedBatch4", lambda: dataset_ops.Dataset.range(5).repeat().padded_batch(2, []), cardinality.INFINITE), ("Prefetch", lambda: dataset_ops.Dataset.range(5).prefetch(buffer_size=1), 5), ("Range1", lambda: dataset_ops.Dataset.range(0), 0), ("Range2", lambda: dataset_ops.Dataset.range(5), 5), ("Range3", lambda: dataset_ops.Dataset.range(5, 10), 5), ("Range4", lambda: dataset_ops.Dataset.range(10, 5), 0), ("Range5", lambda: dataset_ops.Dataset.range(5, 10, 2), 3), ("Range6", lambda: dataset_ops.Dataset.range(10, 5, -2), 3), ("Repeat1", lambda: dataset_ops.Dataset.range(0).repeat(0), 0), ("Repeat2", lambda: dataset_ops.Dataset.range(1).repeat(0), 0), ("Repeat3", lambda: dataset_ops.Dataset.range(0).repeat(5), 0), ("Repeat4", lambda: dataset_ops.Dataset.range(1).repeat(5), 5), ("Repeat5", lambda: dataset_ops.Dataset.range(0).repeat(), 0), ("Repeat6", lambda: dataset_ops.Dataset.range(1).repeat(), cardinality.INFINITE), ("Shuffle", lambda: dataset_ops.Dataset.range(5).shuffle(buffer_size=1), 5), ("Shard1", lambda: dataset_ops.Dataset.range(5).shard(2, 0), 3), ("Shard2", lambda: dataset_ops.Dataset.range(5).shard(8, 7), 0), ("Shard3", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).shard(2, 0), cardinality.UNKNOWN), ("Shard4", lambda: dataset_ops.Dataset.range(5).repeat().shard(2, 0), cardinality.INFINITE), ("Skip1", lambda: dataset_ops.Dataset.range(5).skip(2), 3), ("Skip2", lambda: dataset_ops.Dataset.range(5).skip(8), 0), ("Skip3", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).skip(2), cardinality.UNKNOWN), ("Skip4", lambda: dataset_ops.Dataset.range(5).repeat().skip(2), cardinality.INFINITE), ("Take1", lambda: dataset_ops.Dataset.range(5).take(2), 2), ("Take2", lambda: dataset_ops.Dataset.range(5).take(8), 5), ("Take3", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).take(2), cardinality.UNKNOWN), ("Take4", lambda: dataset_ops.Dataset.range(5).repeat().take(2), 2), ("Window1", lambda: dataset_ops.Dataset.range(5).window( size=2, shift=2, drop_remainder=True), 2), ("Window2", lambda: dataset_ops.Dataset.range(5).window( size=2, shift=2, drop_remainder=False), 3), ("Zip1", lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5)), 5), ("Zip2", lambda: dataset_ops.Dataset.zip( (dataset_ops.Dataset.range(5), dataset_ops.Dataset.range(3))), 3), ("Zip3", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range( 5), dataset_ops.Dataset.range(3).repeat())), 5), ("Zip4", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range( 5).repeat(), dataset_ops.Dataset.range(3).repeat())), cardinality.INFINITE), ("Zip5", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range( 5), dataset_ops.Dataset.range(3).filter(lambda _: True))), cardinality.UNKNOWN), # pylint: enable=g-long-lambda ) def testNumElements(self, dataset_fn, expected_result): with self.cached_session() as sess: self.assertEqual( sess.run(cardinality.cardinality(dataset_fn())), expected_result) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/cardinality_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.group_by_reducer()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class GroupByReducerTest(test_base.DatasetTestBase): def testSum(self): reducer = grouping.Reducer( init_func=lambda _: np.int64(0), reduce_func=lambda x, y: x + y, finalize_func=lambda x: x) for i in range(1, 11): dataset = dataset_ops.Dataset.range(2 * i).apply( grouping.group_by_reducer(lambda x: x % 2, reducer)) self.assertDatasetProduces( dataset, expected_shapes=tensor_shape.TensorShape([]), expected_output=[(i - 1) * i, i * i]) def testAverage(self): def reduce_fn(x, y): return (x[0] * x[1] + math_ops.cast(y, dtypes.float32)) / ( x[1] + 1), x[1] + 1 reducer = grouping.Reducer( init_func=lambda _: (0.0, 0.0), reduce_func=reduce_fn, finalize_func=lambda x, _: x) for i in range(1, 11): dataset = dataset_ops.Dataset.range(2 * i).apply( grouping.group_by_reducer( lambda x: math_ops.cast(x, dtypes.int64) % 2, reducer)) self.assertDatasetProduces( dataset, expected_shapes=tensor_shape.TensorShape([]), expected_output=[i - 1, i]) def testConcat(self): components = np.array(list("abcdefghijklmnopqrst")).view(np.chararray) reducer = grouping.Reducer( init_func=lambda x: "", reduce_func=lambda x, y: x + y[0], finalize_func=lambda x: x) for i in range(1, 11): dataset = dataset_ops.Dataset.zip( (dataset_ops.Dataset.from_tensor_slices(components), dataset_ops.Dataset.range(2 * i))).apply( grouping.group_by_reducer(lambda x, y: y % 2, reducer)) self.assertDatasetProduces( dataset, expected_shapes=tensor_shape.TensorShape([]), expected_output=[b"acegikmoqs"[:i], b"bdfhjlnprt"[:i]]) def testSparseSum(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=np.array([[0, 0]]), values=(i * np.array([1], dtype=np.int64)), dense_shape=np.array([1, 1])) reducer = grouping.Reducer( init_func=lambda _: _sparse(np.int64(0)), reduce_func=lambda x, y: _sparse(x.values[0] + y.values[0]), finalize_func=lambda x: x.values[0]) for i in range(1, 11): dataset = dataset_ops.Dataset.range(2 * i).map(_sparse).apply( grouping.group_by_reducer(lambda x: x.values[0] % 2, reducer)) self.assertDatasetProduces( dataset, expected_shapes=tensor_shape.TensorShape([]), expected_output=[(i - 1) * i, i * i]) def testChangingStateShape(self): def reduce_fn(x, _): # Statically known rank, but dynamic length. larger_dim = array_ops.concat([x[0], x[0]], 0) # Statically unknown rank. larger_rank = array_ops.expand_dims(x[1], 0) return larger_dim, larger_rank reducer = grouping.Reducer( init_func=lambda x: ([0], 1), reduce_func=reduce_fn, finalize_func=lambda x, y: (x, y)) for i in range(1, 11): dataset = dataset_ops.Dataset.from_tensors(np.int64(0)).repeat(i).apply( grouping.group_by_reducer(lambda x: x, reducer)) dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset) self.assertEqual([None], dataset_output_shapes[0].as_list()) self.assertIs(None, dataset_output_shapes[1].ndims) get_next = self.getNext(dataset) x, y = self.evaluate(get_next()) self.assertAllEqual([0] * (2**i), x) self.assertAllEqual(np.array(1, ndmin=i), y) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testTypeMismatch(self): reducer = grouping.Reducer( init_func=lambda x: constant_op.constant(1, dtype=dtypes.int32), reduce_func=lambda x, y: constant_op.constant(1, dtype=dtypes.int64), finalize_func=lambda x: x) dataset = dataset_ops.Dataset.range(10) with self.assertRaisesRegexp( TypeError, "The element types for the new state must match the initial state."): dataset.apply( grouping.group_by_reducer(lambda _: np.int64(0), reducer)) # TODO(b/78665031): Remove once non-scalar keys are supported. def testInvalidKeyShape(self): reducer = grouping.Reducer( init_func=lambda x: np.int64(0), reduce_func=lambda x, y: x + y, finalize_func=lambda x: x) dataset = dataset_ops.Dataset.range(10) with self.assertRaisesRegexp( ValueError, "`key_func` must return a single tf.int64 tensor."): dataset.apply( grouping.group_by_reducer(lambda _: np.int64((0, 0)), reducer)) # TODO(b/78665031): Remove once non-int64 keys are supported. def testInvalidKeyType(self): reducer = grouping.Reducer( init_func=lambda x: np.int64(0), reduce_func=lambda x, y: x + y, finalize_func=lambda x: x) dataset = dataset_ops.Dataset.range(10) with self.assertRaisesRegexp( ValueError, "`key_func` must return a single tf.int64 tensor."): dataset.apply( grouping.group_by_reducer(lambda _: "wrong", reducer)) def testTuple(self): def init_fn(_): return np.array([], dtype=np.int64), np.int64(0) def reduce_fn(state, value): s1, s2 = state v1, v2 = value return array_ops.concat([s1, [v1]], 0), s2 + v2 def finalize_fn(s1, s2): return s1, s2 reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn) dataset = dataset_ops.Dataset.zip( (dataset_ops.Dataset.range(10), dataset_ops.Dataset.range(10))).apply( grouping.group_by_reducer(lambda x, y: np.int64(0), reducer)) get_next = self.getNext(dataset) x, y = self.evaluate(get_next()) self.assertAllEqual(x, np.asarray([x for x in range(10)])) self.assertEqual(y, 45) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.map_and_batch()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl.testing import parameterized import numpy as np from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class MapAndBatchTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters( ("Default", None, None), ("SequentialCalls", 1, None), ("ParallelCalls", 2, None), ("ParallelBatches", None, 10), ) def testMapAndBatch(self, num_parallel_calls, num_parallel_batches): """Test a dataset that maps a TF function across its input elements.""" # The pipeline is TensorSliceDataset -> # RepeatDataset(count) -> MapAndBatchDataset(square_3, batch_size). components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) def dataset_fn(batch_size, count): dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat( count).apply( batching.map_and_batch( map_func=_map_fn, batch_size=batch_size, num_parallel_calls=num_parallel_calls, num_parallel_batches=num_parallel_batches)) return dataset # Batch of a finite input, where the batch_size divides the # total number of elements. dataset = dataset_fn(14, 28) get_next = self.getNext(dataset) self.assertEqual( [[None] + list(c.shape[1:]) for c in components], [shape.as_list() for shape in dataset_ops.get_legacy_output_shapes(dataset)]) num_batches = (28 * 7) // 14 for i in range(num_batches): result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range(14): self.assertAllEqual(component[(i * 14 + j) % 7]**2, result_component[j]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Batch of a finite input, where the batch_size does not # divide the total number of elements. get_next = self.getNext(dataset_fn(8, 14)) # We expect (num_batches - 1) full-sized batches. num_batches = int(math.ceil((14 * 7) / 8)) for i in range(num_batches - 1): result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range(8): self.assertAllEqual(component[(i * 8 + j) % 7]**2, result_component[j]) result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range((14 * 7) % 8): self.assertAllEqual(component[((num_batches - 1) * 8 + j) % 7]**2, result_component[j]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Batch of an empty input should fail straight away. self.assertDatasetProduces(dataset_fn(8, 0), expected_output=[]) # Empty batch should be an initialization time error. with self.assertRaises(errors.InvalidArgumentError): self.assertDatasetProduces(dataset_fn(0, 14), expected_output=[]) @parameterized.named_parameters( ("Even", False), ("Uneven", True), ) def testMapAndBatchPartialBatch(self, drop_remainder): dataset = ( dataset_ops.Dataset.range(10).apply( batching.map_and_batch( lambda x: array_ops.reshape(x * x, [1]), batch_size=4, drop_remainder=drop_remainder))) if drop_remainder: self.assertEqual( [4, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list()) else: self.assertEqual( [None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list()) expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]]] if not drop_remainder: expected_output.append([[64], [81]]) self.assertDatasetProduces(dataset, expected_output=expected_output) def testMapAndBatchYieldsPartialBatch(self): dataset = ( dataset_ops.Dataset.range(10).apply( batching.map_and_batch(lambda x: array_ops.reshape(x * x, [1]), 4))) self.assertEqual( [None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list()) expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]], [[64], [81]]] self.assertDatasetProduces(dataset, expected_output=expected_output) def testMapAndBatchParallelGetNext(self): dataset = dataset_ops.Dataset.range(50000).apply( batching.map_and_batch(lambda x: x, batch_size=100)) if context.executing_eagerly(): iterator = iter(dataset) get_next = iterator._next_internal # pylint: disable=protected-access else: iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next elements = [] for _ in range(100): elements.append(get_next) for i in range(5): got = self.evaluate([element() for element in elements]) got.sort(key=lambda x: x[0]) expected = [] for j in range(100): expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100)) self.assertAllEqual(got, expected) with self.assertRaises(errors.OutOfRangeError): self.evaluate([element() for element in elements]) def testMapAndBatchParallelGetNextDropRemainder(self): dataset = dataset_ops.Dataset.range(49999).apply( batching.map_and_batch( lambda x: x, batch_size=100, drop_remainder=True)) if context.executing_eagerly(): iterator = iter(dataset) get_next = iterator._next_internal # pylint: disable=protected-access else: iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next elements = [] for _ in range(100): elements.append(get_next) for i in range(4): got = self.evaluate([element() for element in elements]) got.sort(key=lambda x: x[0]) expected = [] for j in range(100): expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100)) self.assertAllEqual(got, expected) with self.assertRaises(errors.OutOfRangeError): self.evaluate([element() for element in elements]) def testMapAndBatchSparse(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) dataset = dataset_ops.Dataset.range(10).apply( batching.map_and_batch(_sparse, 5)) self.assertDatasetProduces( dataset, expected_output=[ sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]], values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4], dense_shape=[5, 1]) for i in range(2) ]) def testMapAndBatchFails(self): """Test a dataset that maps a TF function across its input elements.""" with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"): dataset = dataset_ops.Dataset.from_tensors( array_ops.check_numerics( constant_op.constant(1.0) / constant_op.constant(0.0), "oops")) dataset = dataset.apply(batching.map_and_batch(lambda x: x, 14)) get_next = self.getNext(dataset, requires_initialization=True) self.evaluate(get_next()) def testMapAndBatchShapeMismatch(self): """Test a dataset that maps a TF function across its input elements.""" def generator(): yield [1] yield [2] yield [3] yield [[4, 5, 6]] dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int32) batch_size = 4 dataset = dataset.apply(batching.map_and_batch(lambda x: x, batch_size)) self.assertDatasetProduces( dataset, expected_error=(errors.InvalidArgumentError, "number of elements does not match")) def testMapAndBatchImplicitDispose(self): # Tests whether a map and batch dataset will be cleaned up correctly when # the pipeline does not run it until exhaustion. # The pipeline is TensorSliceDataset -> RepeatDataset(1000) -> # MapAndBatchDataset(f=square_3, batch_size=100). components = (np.arange(1000), np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis], np.array(37.0) * np.arange(1000)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat( 1000).apply(batching.map_and_batch(_map_fn, batch_size=100)) dataset = dataset.prefetch(5) get_next = self.getNext(dataset) for _ in range(3): self.evaluate(get_next()) @parameterized.named_parameters( ("1", 0), ("2", 5), ("3", 10), ("4", 90), ("5", 95), ("6", 99), ) def testMapAndBatchMapError(self, threshold): def raising_py_fn(i): if i >= threshold: raise StopIteration() else: return i dataset = dataset_ops.Dataset.range(100).apply( batching.map_and_batch( lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64), batch_size=10)) get_next = self.getNext(dataset) for i in range(threshold // 10): self.assertAllEqual([i * 10 + j for j in range(10)], self.evaluate(get_next())) for i in range(threshold // 10, 10): with self.assertRaises(errors.InvalidArgumentError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @parameterized.named_parameters( ("1", False, dtypes.bool), ("2", -42, dtypes.int8), ("3", -42, dtypes.int16), ("4", -42, dtypes.int32), ("5", -42, dtypes.int64), ("6", 42, dtypes.uint8), ("7", 42, dtypes.uint16), ("8", 42.0, dtypes.float16), ("9", 42.0, dtypes.float32), ("10", 42.0, dtypes.float64), ("11", b"hello", dtypes.string), ) def testMapAndBatchTypes(self, element, dtype): def gen(): yield element dataset = dataset_ops.Dataset.from_generator(gen, dtype).repeat(100).apply( batching.map_and_batch(lambda x: x, batch_size=10)) get_next = self.getNext(dataset) for _ in range(10): self.assertAllEqual([element for _ in range(10)], self.evaluate(get_next())) @parameterized.named_parameters( ("Identity", None, lambda x: x, None), ("Replicate", None, lambda x: (x, x), None), ("Swap", (None, None), lambda x, y: (y, x), None), ("Project", (None, None), lambda x, y: x, None), ) def testShortCircuit(self, structure, map_fn, num_parallel_calls): dataset = self.structuredDataset(structure).repeat().apply( batching.map_and_batch(map_fn, batch_size=10)) get_next = self.getNext(dataset) if isinstance(structure, tuple): expected = map_fn( *self.evaluate(self.structuredElement(structure, shape=[10]))) else: expected = map_fn( self.evaluate(self.structuredElement(structure, shape=[10]))) self.assertAllEqual(expected, self.evaluate(get_next())) def testShortCircuitCapturedInput(self): captured_t = variables.Variable(42) dataset = self.structuredDataset(None).repeat().apply( batching.map_and_batch(lambda x: captured_t, batch_size=10)) self.evaluate(variables.global_variables_initializer()) get_next = self.getNext(dataset, requires_initialization=True) self.assertAllEqual([42] * 10, self.evaluate(get_next())) def testMapAndBatchControlFlow(self): def map_fn(x): previous_control_flow_v2_value = control_flow_util.ENABLE_CONTROL_FLOW_V2 control_flow_util.ENABLE_CONTROL_FLOW_V2 = True return_value = control_flow_ops.cond(x < 50, lambda: x + 1, lambda: x * x) control_flow_util.ENABLE_CONTROL_FLOW_V2 = previous_control_flow_v2_value return return_value dataset = dataset_ops.Dataset.range(100).apply( batching.map_and_batch(map_fn, batch_size=10)) get_next = self.getNext(dataset) for i in range(10): if i < 5: self.assertAllEqual([i * 10 + j + 1 for j in range(10)], self.evaluate(get_next())) else: self.assertAllEqual( [((i * 10) + j) * ((i * 10) + j) for j in range(10)], self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.rejection_resample()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.experimental.ops import resampling from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test from tensorflow.python.util import compat @test_util.run_all_in_graph_and_eager_modes class RejectionResampleTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters( ("InitialDistributionKnown", True), ("InitialDistributionUnknown", False)) def testDistribution(self, initial_known): classes = np.random.randint(5, size=(20000,)) # Uniformly sampled target_dist = [0.9, 0.05, 0.05, 0.0, 0.0] initial_dist = [0.2] * 5 if initial_known else None classes = math_ops.cast(classes, dtypes.int64) # needed for Windows build. dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle( 200, seed=21, reshuffle_each_iteration=False).map( lambda c: (c, string_ops.as_string(c))).repeat() get_next = self.getNext( dataset.apply( resampling.rejection_resample( target_dist=target_dist, initial_dist=initial_dist, class_func=lambda c, _: c, seed=27))) returned = [] while len(returned) < 4000: returned.append(self.evaluate(get_next())) returned_classes, returned_classes_and_data = zip(*returned) _, returned_data = zip(*returned_classes_and_data) self.assertAllEqual([compat.as_bytes(str(c)) for c in returned_classes], returned_data) total_returned = len(returned_classes) class_counts = np.array([ len([True for v in returned_classes if v == c]) for c in range(5)]) returned_dist = class_counts / total_returned self.assertAllClose(target_dist, returned_dist, atol=1e-2) @parameterized.named_parameters( ("OnlyInitial", True), ("NotInitial", False)) def testEdgeCasesSampleFromInitialDataset(self, only_initial_dist): init_dist = [0.5, 0.5] target_dist = [0.5, 0.5] if only_initial_dist else [0.0, 1.0] num_classes = len(init_dist) # We don't need many samples to test that this works. num_samples = 100 data_np = np.random.choice(num_classes, num_samples, p=init_dist) dataset = dataset_ops.Dataset.from_tensor_slices(data_np) # Reshape distribution. dataset = dataset.apply( resampling.rejection_resample( class_func=lambda x: x, target_dist=target_dist, initial_dist=init_dist)) get_next = self.getNext(dataset) returned = [] with self.assertRaises(errors.OutOfRangeError): while True: returned.append(self.evaluate(get_next())) def testRandomClasses(self): init_dist = [0.25, 0.25, 0.25, 0.25] target_dist = [0.0, 0.0, 0.0, 1.0] num_classes = len(init_dist) # We don't need many samples to test a dirac-delta target distribution. num_samples = 100 data_np = np.random.choice(num_classes, num_samples, p=init_dist) dataset = dataset_ops.Dataset.from_tensor_slices(data_np) # Apply a random mapping that preserves the data distribution. def _remap_fn(_): return math_ops.cast(random_ops.random_uniform([1]) * num_classes, dtypes.int32)[0] dataset = dataset.map(_remap_fn) # Reshape distribution. dataset = dataset.apply( resampling.rejection_resample( class_func=lambda x: x, target_dist=target_dist, initial_dist=init_dist)) get_next = self.getNext(dataset) returned = [] with self.assertRaises(errors.OutOfRangeError): while True: returned.append(self.evaluate(get_next())) classes, _ = zip(*returned) bincount = np.bincount( np.array(classes), minlength=num_classes).astype(np.float32) / len(classes) self.assertAllClose(target_dist, bincount, atol=1e-2) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/rejection_resample_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.unique()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import unique from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.platform import test from tensorflow.python.util import compat @test_util.run_all_in_graph_and_eager_modes class UniqueTest(test_base.DatasetTestBase): def _testSimpleHelper(self, dtype, test_cases): """Test the `unique()` transformation on a list of test cases. Args: dtype: The `dtype` of the elements in each test case. test_cases: A list of pairs of lists. The first component is the test input that will be passed to the transformation; the second component is the expected sequence of outputs from the transformation. """ # The `current_test_case` will be updated when we loop over `test_cases` # below; declare it here so that the generator can capture it once. current_test_case = [] dataset = dataset_ops.Dataset.from_generator(lambda: current_test_case, dtype).apply(unique.unique()) for test_case, expected in test_cases: current_test_case = test_case self.assertDatasetProduces(dataset, [ compat.as_bytes(element) if dtype == dtypes.string else element for element in expected ]) @test_util.run_deprecated_v1 def testSimpleInt(self): for dtype in [dtypes.int32, dtypes.int64]: self._testSimpleHelper(dtype, [ ([], []), ([1], [1]), ([1, 1, 1, 1, 1, 1, 1], [1]), ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 3, 2, 1, 2, 3, 4], [1, 2, 4, 3]), ([[1], [1, 1], [1, 1, 1]], [[1], [1, 1], [1, 1, 1]]), ([[1, 1], [1, 1], [2, 2], [3, 3], [1, 1]], [[1, 1], [2, 2], [3, 3]]), ]) @test_util.run_deprecated_v1 def testSimpleString(self): self._testSimpleHelper(dtypes.string, [ ([], []), (["hello"], ["hello"]), (["hello", "hello", "hello"], ["hello"]), (["hello", "world"], ["hello", "world"]), (["foo", "bar", "baz", "baz", "bar", "foo"], ["foo", "bar", "baz"]), ]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/unique_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.scan()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from tensorflow.python.data.experimental.ops import scan_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_v2_toggles from tensorflow.python.ops import math_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ScanTest(test_base.DatasetTestBase): def _counting_dataset(self, start, scan_fn): return dataset_ops.Dataset.from_tensors(0).repeat().apply( scan_ops.scan(start, scan_fn)) def testCount(self): def make_scan_fn(step): return lambda state, _: (state + step, state) def dataset_fn(start, step, take): return self._counting_dataset(start, make_scan_fn(step)).take(take) for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10), (10, 2, 10), (10, -1, 10), (10, -2, 10)]: next_element = self.getNext(dataset_fn(start_val, step_val, take_val)) for expected, _ in zip( itertools.count(start_val, step_val), range(take_val)): self.assertEqual(expected, self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testFibonacci(self): data = dataset_ops.Dataset.from_tensors(1).repeat(None).apply( scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1]))) next_element = self.getNext(data) self.assertEqual(1, self.evaluate(next_element())) self.assertEqual(1, self.evaluate(next_element())) self.assertEqual(2, self.evaluate(next_element())) self.assertEqual(3, self.evaluate(next_element())) self.assertEqual(5, self.evaluate(next_element())) self.assertEqual(8, self.evaluate(next_element())) def testSparseCount(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=np.array([[0, 0]]), values=(i * np.array([1])), dense_shape=np.array([1, 1])) def make_scan_fn(step): return lambda state, _: (_sparse(state.values[0] + step), state) def dataset_fn(start, step, take): return self._counting_dataset(_sparse(start), make_scan_fn(step)).take(take) for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10), (10, 2, 10), (10, -1, 10), (10, -2, 10)]: next_element = self.getNext(dataset_fn(start_val, step_val, take_val)) for expected, _ in zip( itertools.count(start_val, step_val), range(take_val)): self.assertEqual(expected, self.evaluate(next_element()).values[0]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTensorArraySimple(self): def scan_fn(ta, x): return (ta.write(ta.size(), x), ta.stack()) start = tensor_array_ops.TensorArray( size=0, element_shape=[], dtype=dtypes.int64, dynamic_size=True) start = start.write(0, -1) ds = dataset_ops.Dataset.range(5).apply(scan_ops.scan(start, scan_fn)) self.assertDatasetProduces( ds, expected_output=[ [-1], [-1, 0], [-1, 0, 1], [-1, 0, 1, 2], [-1, 0, 1, 2, 3], ], requires_initialization=True, num_test_iterations=2) def testTensorArrayWithCondReset(self): def empty(): return tensor_array_ops.TensorArray( size=0, element_shape=[], dtype=dtypes.int64, dynamic_size=True) def scan_fn(ta, x): updated = ta.write(ta.size(), x) next_iter = control_flow_ops.cond( math_ops.equal(x % 3, 0), empty, lambda: updated) return (next_iter, updated.stack()) start = empty() start = start.write(0, -1) ds = dataset_ops.Dataset.range(6).apply(scan_ops.scan(start, scan_fn)) self.assertDatasetProduces( ds, expected_output=[ [-1, 0], [1], [1, 2], [1, 2, 3], [4], [4, 5], ], requires_initialization=True, num_test_iterations=2) def testTensorArrayWithCondResetByExternalCaptureBreaks(self): if control_flow_v2_toggles.control_flow_v2_enabled(): self.skipTest("v1 only test") empty_ta = tensor_array_ops.TensorArray( size=0, element_shape=[], dtype=dtypes.int64, dynamic_size=True) def scan_fn(ta, x): updated = ta.write(ta.size(), x) # Here, capture empty_ta from outside the function. However, it may be # either a TF1-style TensorArray or an Eager-style TensorArray. next_iter = control_flow_ops.cond( math_ops.equal(x % 3, 0), lambda: empty_ta, lambda: updated) return (next_iter, updated.stack()) start = empty_ta start = start.write(0, -1) with self.assertRaisesRegexp( NotImplementedError, r"construct a new TensorArray inside the function"): dataset_ops.Dataset.range(6).apply(scan_ops.scan(start, scan_fn)) def testChangingStateShape(self): # Test the fixed-point shape invariant calculations: start with # initial values with known shapes, and use a scan function that # changes the size of the state on each element. def _scan_fn(state, input_value): # Statically known rank, but dynamic length. ret_longer_vector = array_ops.concat([state[0], state[0]], 0) # Statically unknown rank. ret_larger_rank = array_ops.expand_dims(state[1], 0) return (ret_longer_vector, ret_larger_rank), (state, input_value) dataset = dataset_ops.Dataset.from_tensors(0).repeat(5).apply( scan_ops.scan(([0], 1), _scan_fn)) self.assertEqual( [None], dataset_ops.get_legacy_output_shapes(dataset)[0][0].as_list()) self.assertIs( None, dataset_ops.get_legacy_output_shapes(dataset)[0][1].ndims) self.assertEqual( [], dataset_ops.get_legacy_output_shapes(dataset)[1].as_list()) next_element = self.getNext(dataset) for i in range(5): (longer_vector_val, larger_rank_val), _ = self.evaluate(next_element()) self.assertAllEqual([0] * (2**i), longer_vector_val) self.assertAllEqual(np.array(1, ndmin=i), larger_rank_val) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testIncorrectStateType(self): def _scan_fn(state, _): return constant_op.constant(1, dtype=dtypes.int64), state dataset = dataset_ops.Dataset.range(10) with self.assertRaisesRegexp( TypeError, "The element types for the new state must match the initial state."): dataset.apply( scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn)) def testIncorrectReturnType(self): def _scan_fn(unused_state, unused_input_value): return constant_op.constant(1, dtype=dtypes.int64) dataset = dataset_ops.Dataset.range(10) with self.assertRaisesRegexp( TypeError, "The scan function must return a pair comprising the new state and the " "output value."): dataset.apply( scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn)) def testPreserveCardinality(self): def scan_fn(state, val): def py_fn(_): raise StopIteration() return state, script_ops.py_func(py_fn, [val], dtypes.int64) dataset = dataset_ops.Dataset.from_tensors(0).apply( scan_ops.scan(constant_op.constant(1), scan_fn)) get_next = self.getNext(dataset) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/scan_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the experimental input pipeline statistics gathering ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.experimental.ops import stats_aggregator from tensorflow.python.data.experimental.ops import stats_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class StatsDatasetTest(stats_dataset_test_base.StatsDatasetTestBase): def testBytesProduced(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).map( lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).apply( stats_ops.bytes_produced_stats("bytes_produced")) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) expected_sum = 0.0 for i in range(100): self.assertAllEqual( np.array([i] * i, dtype=np.int64), self.evaluate(next_element())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "bytes_produced", float(i + 1), i + 2) expected_sum += i * 8.0 self.assertStatisticsHasSum(handle, "bytes_produced", expected_sum, i + 2) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "bytes_produced", 100.0, 101) self.assertStatisticsHasSum(handle, "bytes_produced", expected_sum, 101) def testLatencyStats(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(100): self.assertEqual(i, self.evaluate(next_element())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", float(i + 1), i + 2) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", 100.0, 101) def testPrefetchBufferUtilization(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).map( lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(-1) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(100): self.assertAllEqual( np.array([i] * i, dtype=np.int64), self.evaluate(next_element())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, self.regexForNodeName("PrefetchDataset", "buffer_utilization"), float(i + 1), 3 * i + 4, offset=2) self.assertStatisticsContains( handle, self.regexForNodeName("PrefetchDataset", "buffer_capacity"), 3 * i + 4) self.assertStatisticsContains( handle, self.regexForNodeName("PrefetchDataset", "buffer_size"), 3 * i + 4, offset=1) self.assertStatisticsHasRange( handle, self.regexForNodeName("PrefetchDataset", "buffer_utilization"), 0, 1, 3 * i + 4, offset=2) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, self.regexForNodeName("PrefetchDataset", "buffer_utilization"), 100, 301, offset=2) def testPrefetchBufferScalars(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(10).map( lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(1) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(10): self.assertAllEqual( np.array([i] * i, dtype=np.int64), self.evaluate(next_element())) handle = self.getHandle(aggregator) self.assertStatisticsHasScalarValue( handle, self.regexForNodeName("PrefetchDataset", "buffer_capacity"), 1, 3 * i + 4) self.assertStatisticsHasScalarValue( handle, self.regexForNodeName("PrefetchDataset", "buffer_size"), 1, 3 * i + 4, offset=1) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testFilteredElementsStats(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(101).filter( lambda x: math_ops.equal(math_ops.mod(x, 3), 0)) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(34): self.assertEqual(i * 3, self.evaluate(next_element())) handle = self.getHandle(aggregator) if i != 0: self.assertStatisticsHasScalarValue( handle, self.regexForNodeName("FilterDataset", "dropped_elements"), float(i * 2)) self.assertStatisticsHasScalarValue( handle, self.regexForNodeName("FilterDataset", "filtered_elements"), float(i + 1)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasScalarValue( handle, self.regexForNodeName("FilterDataset", "dropped_elements"), 67.0) self.assertStatisticsHasScalarValue( handle, self.regexForNodeName("FilterDataset", "filtered_elements"), 34.0) def testReinitialize(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")) dataset = self.datasetExperimentalStats(dataset, aggregator) for j in range(5): next_element = self.getNext(dataset, requires_initialization=True) for i in range(100): self.assertEqual(i, self.evaluate(next_element())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", float((j * 100) + i + 1), (j * 100) + i + 2) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", (j + 1) * 100.0, (j * 100) + 101) def testNoAggregatorRegistered(self): dataset = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")) next_element = self.getNext(dataset, requires_initialization=True) for i in range(100): self.assertEqual(i, self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testMultipleTags(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")).apply( stats_ops.latency_stats("record_latency_2")) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(100): self.assertEqual(i, self.evaluate(next_element())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, "record_latency", float(i + 1), 2 * i + 3, offset=1) self.assertStatisticsHasCount(handle, "record_latency_2", float(i + 1), 2 * i + 3) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, "record_latency", 100.0, 201, offset=1) self.assertStatisticsHasCount(handle, "record_latency_2", 100.0, 201) def testRepeatedTags(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")).apply( stats_ops.latency_stats("record_latency")) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(100): self.assertEqual(i, self.evaluate(next_element())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", float(2 * (i + 1)), 2 * i + 3) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", 200.0, 201) def testMultipleIteratorsSameAggregator(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element1 = self.getNext(dataset, requires_initialization=True) next_element2 = self.getNext(dataset, requires_initialization=True) for i in range(100): self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", float(2 * (i + 1)), 2 * i + 3) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element1()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element2()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount(handle, "record_latency", 200.0, 201) def testMultipleDatasetWithPrefixes(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")) dataset = self.datasetExperimentalStats( dataset, aggregator, prefix="dataset1") dataset2 = dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats("record_latency")) dataset2 = self.datasetExperimentalStats( dataset2, aggregator, prefix="dataset2") next_element1 = self.getNext(dataset, requires_initialization=True) next_element2 = self.getNext(dataset2, requires_initialization=True) for i in range(100): self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2())) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, "dataset1::record_latency", float(i + 1), 2 * i + 3, offset=1) self.assertStatisticsHasCount(handle, "dataset2::record_latency", float(i + 1), 2 * i + 3) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element1()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element2()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, "dataset1::record_latency", 100.0, 201, offset=1) self.assertStatisticsHasCount(handle, "dataset2::record_latency", 100.0, 201) def testMultiplePrefetchStats(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.range(10).prefetch( 2).filter(lambda x: math_ops.equal(math_ops.mod(x, 2), 0)).prefetch(1) dataset = self.datasetExperimentalStats(dataset, aggregator) next_element = self.getNext(dataset, requires_initialization=True) for i in range(5): self.assertEqual(i * 2, self.evaluate(next_element())) handle = self.getHandle(aggregator) # TODO(shivaniagarwal): using exact name of prefetch node than the regex, # to differentiate between two prefetch. This might break in future, at # which point, it would be best to disable this test. self.assertStatisticsHasScalarValue( handle, "PrefetchDataset/_5::buffer_capacity", 2) self.assertStatisticsContains(handle, "PrefetchDataset/_5::buffer_size") self.assertStatisticsHasScalarValue( handle, "PrefetchDataset/_8::buffer_capacity", 1) self.assertStatisticsContains(handle, "PrefetchDataset/_8::buffer_size") with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) class ThreadUtilizationStatsTest(stats_dataset_test_base.StatsDatasetTestBase): def testMapBufferUtilization(self): def dataset_fn(): return dataset_ops.Dataset.range(10).map( lambda x: array_ops.tile([x], ops.convert_to_tensor([x])), num_parallel_calls=4) self.parallelCallsStats( dataset_fn, {"ParallelMapDataset"}, 10, function_processing_time=True) def testMapAutoTuneBufferUtilization(self): def dataset_fn(): return dataset_ops.Dataset.range(10).map( lambda x: array_ops.tile([x], ops.convert_to_tensor([x])), num_parallel_calls=dataset_ops.AUTOTUNE) self.parallelCallsStats( dataset_fn, {"ParallelMapDataset"}, 10, function_processing_time=True) def testInterleaveAutoTuneBufferUtilization(self): def dataset_fn(): def interleave_fn(_): return dataset_ops.Dataset.range( 10).map(lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))) return dataset_ops.Dataset.range(1).interleave( interleave_fn, cycle_length=1, num_parallel_calls=dataset_ops.AUTOTUNE) self.parallelCallsStats(dataset_fn, {"ParallelInterleaveDatasetV2"}, 10) def testMapAndBatchAutoTuneBufferUtilization(self): def dataset_fn(): return dataset_ops.Dataset.range(100).apply( batching.map_and_batch( lambda x: array_ops.tile([x], ops.convert_to_tensor([2])), num_parallel_calls=dataset_ops.AUTOTUNE, batch_size=16)) num_output = 100 // 16 + 1 self.parallelCallsStats( dataset_fn, {"MapAndBatchDataset"}, num_output, check_elements=False, function_processing_time=True) class FeatureStatsDatasetTest( stats_dataset_test_base.StatsDatasetTestBase, reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase): def testFeaturesStats(self): num_epochs = 5 total_records = num_epochs * self._num_records batch_size = 2 def dataset_fn(): return self.make_batch_feature( filenames=self.test_filenames[0], num_epochs=num_epochs, batch_size=batch_size, shuffle=True, shuffle_seed=5, drop_final_batch=False) num_output = total_records // batch_size if total_records % batch_size: num_output = total_records // batch_size + 1 self.parallelCallsStats( dataset_fn, {"ParseExampleDataset"}, num_output, check_elements=False) aggregator = stats_aggregator.StatsAggregator() dataset = self.datasetExperimentalStats( dataset_fn(), aggregator, prefix="record_stats") next_element = self.getNext(dataset, requires_initialization=True) for _ in range(num_output): self.evaluate(next_element()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_stats::ParseExampleDataset", "features_count"), total_records) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_stats::ParseExampleDataset", "feature_values_count"), total_records) self.assertStatisticsHasSum( handle, self.regexForNodeName("record_stats::ParseExampleDataset", "features_count"), total_records * 4) self.assertStatisticsHasSum( handle, self.regexForNodeName("record_stats::ParseExampleDataset", "feature_values_count"), self._sum_keywords(1) * num_epochs + 3 * total_records) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the experimental input pipeline ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import get_single_element from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import function from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class GetSingleElementTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters( ("Zero", 0, 1), ("Five", 5, 1), ("Ten", 10, 1), ("Empty", 100, 1, errors.InvalidArgumentError, "Dataset was empty."), ("MoreThanOne", 0, 2, errors.InvalidArgumentError, "Dataset had more than one element."), ) def testGetSingleElement(self, skip, take, error=None, error_msg=None): def make_sparse(x): x_1d = array_ops.reshape(x, [1]) x_2d = array_ops.reshape(x, [1, 1]) return sparse_tensor.SparseTensor(x_2d, x_1d, x_1d) dataset = dataset_ops.Dataset.range(100).skip( skip).map(lambda x: (x * x, make_sparse(x))).take(take) if error is None: dense_val, sparse_val = self.evaluate( get_single_element.get_single_element(dataset)) self.assertEqual(skip * skip, dense_val) self.assertAllEqual([[skip]], sparse_val.indices) self.assertAllEqual([skip], sparse_val.values) self.assertAllEqual([skip], sparse_val.dense_shape) else: with self.assertRaisesRegexp(error, error_msg): self.evaluate(get_single_element.get_single_element(dataset)) def testWindow(self): """Test that `get_single_element()` can consume a nested dataset.""" def flat_map_func(ds): batched = ds.batch(2) element = get_single_element.get_single_element(batched) return dataset_ops.Dataset.from_tensors(element) dataset = dataset_ops.Dataset.range(10).window(2).flat_map(flat_map_func) self.assertDatasetProduces( dataset, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]) def testSideEffect(self): counter_var = variables.Variable(0) def increment_fn(x): counter_var.assign_add(1) return x def dataset_fn(): return dataset_ops.Dataset.range(1).map(increment_fn) @function.defun def fn(): _ = get_single_element.get_single_element(dataset_fn()) return "hello" self.evaluate(counter_var.initializer) self.assertEqual(self.evaluate(fn()), b"hello") self.assertEqual(self.evaluate(counter_var), 1) def testAutomaticControlDependencies(self): counter_var = variables.Variable(1) def increment_fn(x): counter_var.assign(counter_var + 1) return x def multiply_fn(x): counter_var.assign(counter_var * 2) return x def dataset1_fn(): return dataset_ops.Dataset.range(1).map(increment_fn) def dataset2_fn(): return dataset_ops.Dataset.range(1).map(multiply_fn) @function.defun def fn(): _ = get_single_element.get_single_element(dataset1_fn()) _ = get_single_element.get_single_element(dataset2_fn()) return "hello" self.evaluate(counter_var.initializer) self.assertEqual(self.evaluate(fn()), b"hello") self.assertEqual(self.evaluate(counter_var), 4) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/get_single_element_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.TFRecordWriter`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.experimental.ops import writers from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers from tensorflow.python.eager import function from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.lib.io import python_io from tensorflow.python.lib.io import tf_record from tensorflow.python.ops import string_ops from tensorflow.python.platform import test from tensorflow.python.util import compat @test_util.run_all_in_graph_and_eager_modes class TFRecordWriterTest(test_base.DatasetTestBase): def setUp(self): super(TFRecordWriterTest, self).setUp() self._num_records = 8 def writer_fn(self, filename, compression_type=""): input_dataset = readers.TFRecordDataset([filename], compression_type) return writers.TFRecordWriter(self._outputFilename(), compression_type).write(input_dataset) def _record(self, i): return compat.as_bytes("Record %d" % (i)) def _createFile(self, options=None): filename = self._inputFilename() writer = python_io.TFRecordWriter(filename, options) for i in range(self._num_records): writer.write(self._record(i)) writer.close() return filename def _inputFilename(self): return os.path.join(self.get_temp_dir(), "tf_record.in.txt") def _outputFilename(self): return os.path.join(self.get_temp_dir(), "tf_record.out.txt") def testWrite(self): self.evaluate(self.writer_fn(self._createFile())) for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())): self.assertAllEqual(self._record(i), r) def testWriteZLIB(self): options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.ZLIB) self.evaluate( self.writer_fn(self._createFile(options), compression_type="ZLIB")) for i, r in enumerate( tf_record.tf_record_iterator(self._outputFilename(), options=options)): self.assertAllEqual(self._record(i), r) def testWriteGZIP(self): options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.GZIP) self.evaluate( self.writer_fn(self._createFile(options), compression_type="GZIP")) for i, r in enumerate( tf_record.tf_record_iterator(self._outputFilename(), options=options)): self.assertAllEqual(self._record(i), r) def testFailDataset(self): with self.assertRaises(TypeError): writers.TFRecordWriter(self._outputFilename(), "").write("whoops") def testFailDType(self): input_dataset = dataset_ops.Dataset.from_tensors(10) with self.assertRaises(TypeError): writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset) def testFailShape(self): input_dataset = dataset_ops.Dataset.from_tensors([["hello"], ["world"]]) with self.assertRaises(TypeError): writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset) def testSideEffect(self): def writer_fn(): input_dataset = readers.TFRecordDataset(self._createFile()) return writers.TFRecordWriter(self._outputFilename()).write(input_dataset) @function.defun def fn(): _ = writer_fn() return "hello" self.assertEqual(self.evaluate(fn()), b"hello") for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())): self.assertAllEqual(self._record(i), r) def testShard(self): filename = self._createFile() dataset = readers.TFRecordDataset([filename]) def reduce_func(key, dataset): shard_filename = string_ops.string_join( [filename, string_ops.as_string(key)]) writer = writers.TFRecordWriter(shard_filename) writer.write(dataset.map(lambda _, x: x)) return dataset_ops.Dataset.from_tensors(shard_filename) dataset = dataset.enumerate() dataset = dataset.apply( grouping.group_by_window(lambda i, _: i % 2, reduce_func, dtypes.int64.max)) get_next = self.getNext(dataset) for i in range(2): shard_filename = (filename + str(i)).encode() self.assertEqual(self.evaluate(get_next()), shard_filename) for j, r in enumerate(tf_record.tf_record_iterator(shard_filename)): self.assertAllEqual(self._record(i + 2*j), r) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/tf_record_writer_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.parallel_interleave()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import threading import time import numpy as np from six.moves import zip_longest from tensorflow.python.data.experimental.ops import interleave_ops from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ParallelInterleaveTest(test_base.DatasetTestBase): def setUp(self): self.error = None self.repeat_count = 2 # Set up threading events used to sequence when items are produced that # are subsequently interleaved. These events allow us to deterministically # simulate slowdowns and force sloppiness. self.read_coordination_events = {} self.write_coordination_events = {} # input values [4, 5, 6] are the common case for the tests; set defaults for i in range(4, 7): self.read_coordination_events[i] = threading.Semaphore(0) self.write_coordination_events[i] = threading.Event() def dataset_fn(self, input_values, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements): def map_py_fn(x): self.write_coordination_events[x].wait() self.write_coordination_events[x].clear() self.read_coordination_events[x].release() if self.error: err = self.error self.error = None raise err # pylint: disable=raising-bad-type return x * x def map_fn(x): return script_ops.py_func(map_py_fn, [x], x.dtype) def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset.repeat(x) return dataset.map(map_fn) return dataset_ops.Dataset.from_tensor_slices(input_values).repeat( self.repeat_count).apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements)) def _interleave(self, lists, cycle_length, block_length): """Python implementation of interleave used for testing.""" num_open = 0 # `all_iterators` acts as a queue of iterators over each element of `lists`. all_iterators = [iter(l) for l in lists] # `open_iterators` are the iterators whose elements are currently being # interleaved. open_iterators = [] for i in range(cycle_length): if all_iterators: open_iterators.append(all_iterators.pop(0)) num_open += 1 else: open_iterators.append(None) while num_open or all_iterators: for i in range(cycle_length): if open_iterators[i] is None: if all_iterators: open_iterators[i] = all_iterators.pop(0) num_open += 1 else: continue for _ in range(block_length): try: yield next(open_iterators[i]) except StopIteration: open_iterators[i] = None num_open -= 1 break def testPythonImplementation(self): input_lists = [[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]] # Cycle length 1 acts like `Dataset.flat_map()`. expected_elements = itertools.chain(*input_lists) for expected, produced in zip(expected_elements, self._interleave(input_lists, 1, 1)): self.assertEqual(expected, produced) # Cycle length > 1. expected_elements = [ 4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5, 6, 6 ] for index, (expected, produced) in enumerate( zip_longest(expected_elements, self._interleave(input_lists, 2, 1))): self.assertEqual(expected, produced, "Values differ at %s. %s != %s" % (index, expected, produced)) def testPythonImplementationBlockLength(self): input_lists = [[4] * 4, [5] * 5, [6] * 6] * 2 expected_elements = [ 4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 6, 6 ] for index, (expected, produced) in enumerate( zip_longest(expected_elements, self._interleave(input_lists, 2, 2))): self.assertEqual(expected, produced, "Values differ at %s. %s != %s" % (index, expected, produced)) def testPythonImplementationEmptyLists(self): input_lists = [[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6]] expected_elements = [ 4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6 ] for index, (expected, produced) in enumerate( zip_longest(expected_elements, self._interleave(input_lists, 2, 1))): self.assertEqual(expected, produced, "Values differ at %s. %s != %s" % (index, expected, produced)) def _clear_coordination_events(self): for i in range(4, 7): self.read_coordination_events[i] = threading.Semaphore(0) self.write_coordination_events[i].clear() def _allow_all_map_threads(self): for i in range(4, 7): self.write_coordination_events[i].set() def _testSingleThreaded(self, sloppy=False, prefetch_input_elements=0): # cycle_length=1,block_length=1 acts like `Dataset.interleave()` and # `Dataset.flat_map()` and is single-threaded. No synchronization required. self.skipTest("b/131722904") self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=1, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=prefetch_input_elements)) for expected_element in self._interleave( [[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 1): self.write_coordination_events[expected_element].set() self.assertEqual(expected_element * expected_element, self.evaluate(next_element())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testSingleThreaded(self): self._testSingleThreaded() def testSingleThreadedSloppy(self): self._testSingleThreaded(sloppy=True) def testSingleThreadedPrefetch1Itr(self): self._testSingleThreaded(prefetch_input_elements=1) def testSingleThreadedPrefetch1ItrSloppy(self): self._testSingleThreaded(prefetch_input_elements=1, sloppy=True) def testSingleThreadedRagged(self): # Tests a sequence with wildly different elements per iterator. self.skipTest("b/131722904") self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([3, 7, 4]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=1)) # Add coordination values for 3 and 7 self.read_coordination_events[3] = threading.Semaphore(0) self.write_coordination_events[3] = threading.Event() self.read_coordination_events[7] = threading.Semaphore(0) self.write_coordination_events[7] = threading.Event() for expected_element in self._interleave( [[3] * 3, [7] * 7, [4] * 4] * self.repeat_count, 2, 1): self.write_coordination_events[expected_element].set() output = self.evaluate(next_element()) self.assertEqual(expected_element * expected_element, output) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def _testTwoThreadsNoContention(self, sloppy=False): # num_threads > 1. # Explicit coordination should result in `Dataset.interleave()` behavior self.skipTest("b/131722904") self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 1)): self.write_coordination_events[expected_element].set() if done_first_event: # First event starts the worker threads. self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event: self.read_coordination_events[expected_element].acquire() done_first_event = True self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContention(self): self._testTwoThreadsNoContention() def testTwoThreadsNoContentionSloppy(self): self._testTwoThreadsNoContention(sloppy=True) def _testTwoThreadsNoContentionWithRaces(self, sloppy=False): """Tests where all the workers race in producing elements. Note: this is in contrast with the previous test which carefully sequences the execution of the map functions. Args: sloppy: Whether to be sloppy or not. """ self.skipTest("b/131722904") self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 1)): if done_first_event: # First event starts the worker threads. self._allow_all_map_threads() self.read_coordination_events[expected_element].acquire() else: self.write_coordination_events[expected_element].set() time.sleep(0.5) # Sleep to consistently "avoid" the race condition. actual_element = self.evaluate(next_element()) if not done_first_event: done_first_event = True self.assertTrue( self.read_coordination_events[expected_element].acquire(False)) self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContentionWithRaces(self): self._testTwoThreadsNoContentionWithRaces() def testTwoThreadsNoContentionWithRacesSloppy(self): self._testTwoThreadsNoContentionWithRaces(sloppy=True) def _testTwoThreadsNoContentionBlockLength(self, sloppy=False): # num_threads > 1. # Explicit coordination should result in `Dataset.interleave()` behavior self.skipTest("b/131722904") self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=2, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 2)): self.write_coordination_events[expected_element].set() if done_first_event: # First event starts the worker threads. self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event: done_first_event = True self.read_coordination_events[expected_element].acquire() self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContentionBlockLength(self): self._testTwoThreadsNoContentionBlockLength() def testTwoThreadsNoContentionBlockLengthSloppy(self): self._testTwoThreadsNoContentionBlockLength(sloppy=True) def _testTwoThreadsNoContentionWithRacesAndBlocking(self, sloppy=False): """Tests where all the workers race in producing elements. Note: this is in contrast with the previous test which carefully sequences the execution of the map functions. Args: sloppy: Whether to be sloppy or not. """ self.skipTest("b/131722904") self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=2, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=1)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 2)): if done_first_event: # First event starts the worker threads. self._allow_all_map_threads() self.read_coordination_events[expected_element].acquire() else: self.write_coordination_events[expected_element].set() time.sleep(0.5) # Sleep to consistently "avoid" the race condition. actual_element = self.evaluate(next_element()) if not done_first_event: done_first_event = True self.assertTrue( self.read_coordination_events[expected_element].acquire(False)) self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testTwoThreadsNoContentionWithRacesAndBlocking(self): self._testTwoThreadsNoContentionWithRacesAndBlocking() def testTwoThreadsNoContentionWithRacesAndBlockingSloppy(self): self._testTwoThreadsNoContentionWithRacesAndBlocking(sloppy=True) def _testEmptyInput(self, sloppy=False): # Empty input. self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([]), cycle_length=2, block_length=3, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=0)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testEmptyInput(self): self._testEmptyInput() def testEmptyInputSloppy(self): self._testEmptyInput(sloppy=True) def _testNonEmptyInputIntoEmptyOutputs(self, sloppy=False): # Non-empty input leading to empty output. self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([0, 0, 0]), cycle_length=2, block_length=3, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=0)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testNonEmptyInputIntoEmptyOutputs(self): self._testNonEmptyInputIntoEmptyOutputs() def testNonEmptyInputIntoEmptyOutputsSloppy(self): self._testNonEmptyInputIntoEmptyOutputs(sloppy=True) def _testPartiallyEmptyOutputs(self, sloppy=False, prefetch_input_elements=1): race_indices = {2, 8, 14} # Sequence points when sloppy mode has race conds # Mixture of non-empty and empty interleaved datasets. self.skipTest("b/131722904") self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 0, 6]), cycle_length=2, block_length=1, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=prefetch_input_elements)) for i, expected_element in enumerate( self._interleave([[4] * 4, [], [6] * 6] * self.repeat_count, 2, 1)): self.write_coordination_events[expected_element].set() # First event starts the worker threads. Additionally, when running the # sloppy case with prefetch_input_elements=0, we get stuck if we wait # for the read coordination event for certain event orderings in the # presence of finishing iterators. if done_first_event and not (sloppy and (i in race_indices)): self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event or (sloppy and (i in race_indices)): done_first_event = True self.read_coordination_events[expected_element].acquire() self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) def testPartiallyEmptyOutputs(self): self._testPartiallyEmptyOutputs() def testPartiallyEmptyOutputsSloppy(self): self._testPartiallyEmptyOutputs(sloppy=True, prefetch_input_elements=0) def testDelayedOutputSloppy(self): # Explicitly control the sequence of events to ensure we correctly avoid # head-of-line blocking. self.skipTest("b/131722904") self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=True, buffer_output_elements=1, prefetch_input_elements=0)) mis_ordering = [ 4, 4, 5, 4, 5, 5, 4, 5, 6, 6, 6, 5, 4, 4, 6, 6, 4, 4, 6, 5, 6, 6, 6, 6, 5, 5, 5, 5, 6, 6 ] for element in mis_ordering: self.write_coordination_events[element].set() self.assertEqual(element * element, self.evaluate(next_element())) self.assertTrue(self.read_coordination_events[element].acquire(False)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testBlockLengthWithContentionSloppy(self): self.skipTest("b/131722904") self._clear_coordination_events() done_first_event = False next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=True, buffer_output_elements=1, prefetch_input_elements=1)) # Test against a generating sequence that differs from the uncontended # case, in order to prove sloppy correctness. for i, expected_element in enumerate( self._interleave( [[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, cycle_length=2, block_length=3)): self.write_coordination_events[expected_element].set() if done_first_event: # First event starts the worker threads. self.read_coordination_events[expected_element].acquire() actual_element = self.evaluate(next_element()) if not done_first_event: self.read_coordination_events[expected_element].acquire() done_first_event = True self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def _testEarlyExit(self, sloppy=False): # Exiting without consuming all input should not block self.skipTest("b/131722904") self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=3, block_length=2, sloppy=sloppy, buffer_output_elements=1, prefetch_input_elements=0)) for i in range(4, 7): self.write_coordination_events[i].set() elem = self.evaluate(next_element()) # Start all workers # Allow the one successful worker to progress beyond the py_func again. elem = int(math.sqrt(elem)) self.write_coordination_events[elem].set() self.read_coordination_events[elem].acquire() # Allow the prefetch to succeed for i in range(4, 7): self.read_coordination_events[i].acquire() self.write_coordination_events[i].set() def testEarlyExit(self): self._testEarlyExit() def testEarlyExitSloppy(self): self._testEarlyExit(sloppy=True) def _testTooManyReaders(self, sloppy=False): def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64)) return dataset dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6]) dataset = dataset.repeat(self.repeat_count) dataset = dataset.apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy)) get_next = self.getNext(dataset) output_values = [] for _ in range(30): output_values.append(self.evaluate(get_next())) expected_values = self._interleave( [[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2) self.assertItemsEqual(output_values, expected_values) def testTooManyReaders(self): self._testTooManyReaders() def testTooManyReadersSloppy(self): self._testTooManyReaders(sloppy=True) def testSparse(self): def _map_fn(i): return sparse_tensor.SparseTensor( indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2]) def _interleave_fn(x): return dataset_ops.Dataset.from_tensor_slices( sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values)) dataset = dataset_ops.Dataset.range(10).map(_map_fn).apply( interleave_ops.parallel_interleave(_interleave_fn, cycle_length=1)) get_next = self.getNext(dataset) for i in range(10): for j in range(2): expected = [i, 0] if j % 2 == 0 else [0, -i] self.assertAllEqual(expected, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testErrorsInOutputFn(self): self.skipTest("b/131722904") self._clear_coordination_events() next_element = self.getNext( self.dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) except_on_element_indices = set([3]) for i, expected_element in enumerate( self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2, 1)): if i in except_on_element_indices: self.error = ValueError() self.write_coordination_events[expected_element].set() with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element()) else: self.write_coordination_events[expected_element].set() actual_element = self.evaluate(next_element()) self.assertEqual( expected_element * expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testErrorsInInputFn(self): def map_py_fn(x): if x == 5: raise ValueError() return x def map_fn(x): return script_ops.py_func(map_py_fn, [x], x.dtype) def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) dataset = dataset.repeat(x) return dataset def dataset_fn(input_values, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements): return dataset_ops.Dataset.from_tensor_slices(input_values).map( map_fn).repeat(self.repeat_count).apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements)) next_element = self.getNext( dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)): if expected_element == 5: with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element()) else: actual_element = self.evaluate(next_element()) self.assertEqual( expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testErrorsInInterleaveFn(self): def map_py_fn(x): if x == 5: raise ValueError() return x def interleave_fn(x): dataset = dataset_ops.Dataset.from_tensors(x) y = script_ops.py_func(map_py_fn, [x], x.dtype) dataset = dataset.repeat(y) return dataset def dataset_fn(input_values, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements): return dataset_ops.Dataset.from_tensor_slices(input_values).repeat( self.repeat_count).apply( interleave_ops.parallel_interleave( interleave_fn, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements)) next_element = self.getNext( dataset_fn( input_values=np.int64([4, 5, 6]), cycle_length=2, block_length=1, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) for i, expected_element in enumerate( self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)): if expected_element == 5: with self.assertRaises(errors.InvalidArgumentError): self.evaluate(next_element()) else: actual_element = self.evaluate(next_element()) self.assertEqual( expected_element, actual_element, "At index %s: %s expected, got: %s" % (i, expected_element, actual_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element()) def testShutdownRace(self): dataset = dataset_ops.Dataset.range(20) map_fn = lambda x: dataset_ops.Dataset.range(20 * x, 20 * (x + 1)) dataset = dataset.apply( interleave_ops.parallel_interleave( map_fn, cycle_length=3, sloppy=False, buffer_output_elements=1, prefetch_input_elements=0)) dataset = dataset.batch(32) results = [] for _ in range(2): elements = [] next_element = self.getNext(dataset) try: while True: elements.extend(self.evaluate(next_element())) except errors.OutOfRangeError: pass results.append(elements) self.assertAllEqual(results[0], results[1]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/parallel_interleave_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.make_tf_record_dataset()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base from tensorflow.python.data.experimental.ops import readers from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import string_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class MakeTFRecordDatasetTest( reader_dataset_ops_test_base.TFRecordDatasetTestBase): def _read_test(self, batch_size, num_epochs, file_index=None, num_parallel_reads=1, drop_final_batch=False, parser_fn=False): if file_index is None: file_pattern = self.test_filenames else: file_pattern = self.test_filenames[file_index] if parser_fn: fn = lambda x: string_ops.substr(x, 1, 999) else: fn = None outputs = self.getNext( readers.make_tf_record_dataset( file_pattern=file_pattern, num_epochs=num_epochs, batch_size=batch_size, parser_fn=fn, num_parallel_reads=num_parallel_reads, drop_final_batch=drop_final_batch, shuffle=False)) self._verify_records( outputs, batch_size, file_index, num_epochs=num_epochs, interleave_cycle_length=num_parallel_reads, drop_final_batch=drop_final_batch, use_parser_fn=parser_fn) with self.assertRaises(errors.OutOfRangeError): self.evaluate(outputs()) def testRead(self): for batch_size in [1, 2]: for num_epochs in [1, 3]: # Basic test: read from file 0. self._read_test(batch_size, num_epochs, 0) # Basic test: read from file 1. self._read_test(batch_size, num_epochs, 1) # Basic test: read from both files. self._read_test(batch_size, num_epochs) # Basic test: read from both files, with parallel reads. self._read_test(batch_size, num_epochs, num_parallel_reads=8) def testDropFinalBatch(self): for batch_size in [1, 2, 10]: for num_epochs in [1, 3]: # Read from file 0. self._read_test(batch_size, num_epochs, 0, drop_final_batch=True) # Read from both files. self._read_test(batch_size, num_epochs, drop_final_batch=True) # Read from both files, with parallel reads. self._read_test(batch_size, num_epochs, num_parallel_reads=8, drop_final_batch=True) def testParserFn(self): for batch_size in [1, 2]: for num_epochs in [1, 3]: for drop_final_batch in [False, True]: self._read_test(batch_size, num_epochs, parser_fn=True, drop_final_batch=drop_final_batch) self._read_test(batch_size, num_epochs, num_parallel_reads=8, parser_fn=True, drop_final_batch=drop_final_batch) def _shuffle_test(self, batch_size, num_epochs, num_parallel_reads=1, seed=None): def dataset_fn(): return readers.make_tf_record_dataset( file_pattern=self.test_filenames, num_epochs=num_epochs, batch_size=batch_size, num_parallel_reads=num_parallel_reads, shuffle=True, shuffle_seed=seed) next_element = self.getNext(dataset_fn()) first_batches = [] try: while True: first_batches.append(self.evaluate(next_element())) except errors.OutOfRangeError: pass next_element = self.getNext(dataset_fn()) second_batches = [] try: while True: second_batches.append(self.evaluate(next_element())) except errors.OutOfRangeError: pass self.assertEqual(len(first_batches), len(second_batches)) if seed is not None: # if you set a seed, should get the same results for i in range(len(first_batches)): self.assertAllEqual(first_batches[i], second_batches[i]) expected = [] for f in range(self._num_files): for r in range(self._num_records): expected.extend([self._record(f, r)] * num_epochs) for batches in (first_batches, second_batches): actual = [] for b in batches: actual.extend(b) self.assertAllEqual(sorted(expected), sorted(actual)) def testShuffle(self): for batch_size in [1, 2]: for num_epochs in [1, 3]: for num_parallel_reads in [1, 2]: # Test that all expected elements are produced self._shuffle_test(batch_size, num_epochs, num_parallel_reads) # Test that elements are produced in a consistent order if # you specify a seed. self._shuffle_test(batch_size, num_epochs, num_parallel_reads, seed=21345) def testIndefiniteRepeatShapeInference(self): dataset = readers.make_tf_record_dataset( file_pattern=self.test_filenames, num_epochs=None, batch_size=32) for shape in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)): self.assertEqual(32, shape[0]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/make_tf_record_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `ShuffleAndRepeatFusion` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import tf2 from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ShuffleAndRepeatFusionTest(test_base.DatasetTestBase): def testShuffleAndRepeatFusion(self): if tf2.enabled() and context.executing_eagerly(): expected = "Shuffle" else: expected = "ShuffleAndRepeat" dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next([expected])).shuffle(10).repeat(2) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.shuffle_and_repeat_fusion = True dataset = dataset.with_options(options) get_next = self.getNext(dataset) for _ in range(2): results = [] for _ in range(10): results.append(self.evaluate(get_next())) self.assertAllEqual([x for x in range(10)], sorted(results)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `FilterFusion` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.platform import test def _filter_fusion_test_cases(): """Generates test cases for the FilterFusion optimization.""" take_all = lambda x: constant_op.constant(True) is_zero = lambda x: math_ops.equal(x, 0) greater = lambda x: math_ops.greater(x + 5, 0) tests = [] filters = [take_all, is_zero, greater] identity = lambda x: x for x, predicate_1 in enumerate(filters): for y, predicate_2 in enumerate(filters): tests.append(("Mixed{}{}".format(x, y), identity, [predicate_1, predicate_2])) for z, predicate_3 in enumerate(filters): tests.append(("Mixed{}{}{}".format(x, y, z), identity, [predicate_1, predicate_2, predicate_3])) take_all_multiple = lambda x, y: constant_op.constant(True) # Multi output tests.append(("Multi1", lambda x: (x, x), [take_all_multiple, take_all_multiple])) tests.append(("Multi2", lambda x: (x, 2), [ take_all_multiple, lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0) ])) return tuple(tests) @test_util.run_all_in_graph_and_eager_modes class FilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters(*_filter_fusion_test_cases()) def testFilterFusion(self, map_function, predicates): dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Map", "Filter", "MemoryCacheImpl"])).map(map_function) for predicate in predicates: dataset = dataset.filter(predicate) dataset = dataset.cache() options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.filter_fusion = True dataset = dataset.with_options(options) expected_output = [] for x in range(5): r = map_function(x) filtered = False for predicate in predicates: if isinstance(r, tuple): b = predicate(*r) # Pass tuple as multiple arguments. else: b = predicate(r) if not self.evaluate(b): filtered = True break if not filtered: expected_output.append(r) self.assertDatasetProduces(dataset, expected_output=expected_output) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/filter_fusion_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the private `_ModelDataset` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ModelDatasetTest(test_base.DatasetTestBase, parameterized.TestCase): def testAutotuneOption(self): dataset = dataset_ops.Dataset.from_tensors(0) dataset = dataset.map(lambda x: x).apply( optimization.assert_next(["Model"])) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.autotune = True dataset = dataset.with_options(options) get_next = self.getNext(dataset) self.assertEqual(0, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/model_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental.assert_next()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class AssertNextDatasetTest(test_base.DatasetTestBase): def testAssertNext(self): dataset = dataset_ops.Dataset.from_tensors(0).apply( optimization.assert_next(["Map"])).map(lambda x: x) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) self.assertDatasetProduces(dataset, expected_output=[0]) def testAssertNextInvalid(self): dataset = dataset_ops.Dataset.from_tensors(0).apply( optimization.assert_next(["Whoops"])).map(lambda x: x) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_error=( errors.InvalidArgumentError, "Asserted Whoops transformation at offset 0 but encountered " "Map transformation instead.")) def testAssertNextShort(self): dataset = dataset_ops.Dataset.from_tensors(0).apply( optimization.assert_next(["Map", "Whoops"])).map(lambda x: x) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.autotune = False dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_error=( errors.InvalidArgumentError, "Asserted next 2 transformations but encountered only 1.")) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/assert_next_dataset_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental._ChooseFastestBranchDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ChooseFastestBranchDatasetTest(test_base.DatasetTestBase, parameterized.TestCase): def testSimple(self): dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3, 4]) def branch(dataset): return dataset.map(lambda x: x) choose_fastest = optimization._ChooseFastestBranchDataset( dataset, [branch, branch]) self.assertDatasetProduces( choose_fastest, expected_output=[0, 1, 2, 3, 4], expected_shapes=dataset_ops.get_legacy_output_shapes(dataset)) def testCaptureSimple(self): dataset = dataset_ops.Dataset.range(10) const_64 = constant_op.constant(1, dtypes.int64) const_32 = constant_op.constant(1, dtypes.int32) def branch_0(dataset): return dataset.map(lambda x: x + const_64) def branch_1(dataset): return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64)) choose_fastest = optimization._ChooseFastestBranchDataset( dataset, [branch_0, branch_1]) self.assertDatasetProduces( choose_fastest, expected_output=list(range(1, 11))) def testDifferentFunctions(self): dataset = dataset_ops.Dataset.range(100) def branch_0(dataset): return dataset.map(lambda x: x).batch(10) def branch_1(dataset): return dataset.batch(10).map(lambda x: x) choose_fastest = optimization._ChooseFastestBranchDataset( dataset, [branch_0, branch_1], ratio_numerator=10) self.assertDatasetProduces( choose_fastest, expected_output=[list(range(10 * x, 10 * x + 10)) for x in range(10)]) def testWithRepeatBeforeAndAfter(self): dataset = dataset_ops.Dataset.from_tensors(0).repeat(10) def branch_0(dataset): return dataset.map(lambda x: x).batch(10) def branch_1(dataset): return dataset.batch(10).map(lambda x: x) choose_fastest = optimization._ChooseFastestBranchDataset( dataset, [branch_0, branch_1], ratio_numerator=10) choose_fastest = choose_fastest.repeat(10) self.assertDatasetProduces( choose_fastest, expected_output=[[0] * 10 for _ in range(10)]) def testWithPrefetch(self): """Should maintain ordering even if the branches do prefetching.""" dataset = dataset_ops.Dataset.range(100) def branch_0(dataset): return dataset.prefetch(1) def branch_1(dataset): return dataset.prefetch(2) choose_fastest = optimization._ChooseFastestBranchDataset( dataset, [branch_0, branch_1]) self.assertDatasetProduces(choose_fastest, expected_output=list(range(100))) def testWithMoreOutputThanInput(self): dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100) def branch(dataset): return dataset.apply(batching.unbatch()) choose_fastest = optimization._ChooseFastestBranchDataset( dataset, [branch, branch], ratio_denominator=100, num_elements_per_branch=100) self.assertDatasetProduces(choose_fastest, expected_output=[0] * 1000) def testWithBadNumElements(self): dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100) def branch(dataset): return dataset.apply(batching.unbatch()) def make_dataset(): return optimization._ChooseFastestBranchDataset( dataset, [branch, branch], ratio_denominator=100, num_elements_per_branch=10) expected_error_msg = ("`num_elements_per_branch` must be divisible by " "`ratio_denominator`") if context.executing_eagerly(): with self.assertRaisesRegexp(errors.InvalidArgumentError, expected_error_msg): make_dataset() else: choose_fastest = make_dataset() self.assertDatasetProduces( choose_fastest, expected_error=(errors.InvalidArgumentError, expected_error_msg)) def testErrorWithRepeat(self): dataset = dataset_ops.Dataset.from_tensors(0) def branch(dataset): return dataset.repeat(10) choose_fastest = optimization._ChooseFastestBranchDataset( dataset, [branch, branch], ratio_denominator=10, num_elements_per_branch=10) self.assertDatasetProduces( choose_fastest, expected_error=( errors.InvalidArgumentError, "Cannot create more than one WrapperIterator per WrapperDataset."), expected_error_iter=2) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_branch_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `MapParallelization` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test def _map_parallelization_test_cases(): """Generates test cases for the MapParallelization optimization.""" identity = lambda x: x increment = lambda x: x + 1 def assert_greater(x): assert_op = control_flow_ops.Assert(math_ops.greater(x, -1), [x]) with ops.control_dependencies([assert_op]): return x return (("Identity", identity, True), ("Increment", increment, True), ("AssertGreater", assert_greater, True)) @test_util.run_all_in_graph_and_eager_modes class MapParallelizationTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters(*_map_parallelization_test_cases()) def testMapParallelization(self, function, should_be_parallel): next_nodes = ["ParallelMap"] if should_be_parallel else ["Map"] dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(next_nodes)).map(function) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_parallelization = True dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_output=[function(x) for x in range(5)]) def testMapParallelizationWithCapturedConstant(self): """Tests that functions with captured constants are parallelized.""" captured_t = constant_op.constant(42, dtype=dtypes.int64) def fn(x): return x + captured_t dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["ParallelMap"])).map(fn) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_parallelization = True dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_output=[x + 42 for x in range(5)]) def testMapParallelizationWithCapturedVariable(self): """Tests that functions with captured variables are not parallelized.""" captured_t = variables.Variable(42, dtype=dtypes.int64) def fn(x): return x + captured_t dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Map"])).map(fn) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_parallelization = True dataset = dataset.with_options(options) self.evaluate(variables.global_variables_initializer()) self.assertDatasetProduces( dataset, expected_output=[x + 42 for x in range(5)], requires_initialization=True) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the private `_OptimizeDataset` transformation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import warnings from absl.testing import parameterized import numpy as np from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.experimental.ops import scan_ops from tensorflow.python.data.experimental.ops import threadpool from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test def _generate_captured_refvar_test_cases(): """Generates testcases. Returns: A list of tuples of (testcase_name, make_dataset_fn). make_dataset_fn takes a tf.Variable as input and creates a test dataset that uses that variable. """ def make_map_dataset(var): return dataset_ops.Dataset.from_tensors(0).map(lambda x: x + var) def make_flat_map_dataset(var): return dataset_ops.Dataset.from_tensors( 0).flat_map(lambda _: dataset_ops.Dataset.from_tensors(var)) def make_filter_dataset(var): return dataset_ops.Dataset.from_tensors(0).filter(lambda x: x < var) def make_map_and_batch_dataset(var): def map_fn(x): return x + var return dataset_ops.Dataset.from_tensors(0).apply( batching.map_and_batch(map_fn, 1)) def make_group_by_reducer_dataset(var): reducer = grouping.Reducer( init_func=lambda _: 0, reduce_func=lambda x, y: x, finalize_func=lambda _: var) return dataset_ops.Dataset.range(5).apply( grouping.group_by_reducer(lambda x: x % 2, reducer)) def make_group_by_window_dataset(var): def reduce_fn(key, bucket): del key, bucket return dataset_ops.Dataset.from_tensors(var) return dataset_ops.Dataset.from_tensors(0).repeat(10).apply( grouping.group_by_window(lambda _: 0, reduce_fn, 10)) def make_scan_dataset(var): return dataset_ops.Dataset.from_tensors(0).apply( scan_ops.scan( 0, lambda old_state, elem: (old_state + 1, elem + old_state + var))) return [ # Core datasets ("Map", make_map_dataset), ("FlatMap", make_flat_map_dataset), ("Filter", make_filter_dataset), # Experimental datasets ("MapAndBatch", make_map_and_batch_dataset), ("GroupByReducer", make_group_by_reducer_dataset), ("GroupByWindow", make_group_by_window_dataset), ("Scan", make_scan_dataset) ] @test_util.run_all_in_graph_and_eager_modes class OptimizeDatasetTest(test_base.DatasetTestBase, parameterized.TestCase): def testOptimizationStatefulFunction(self): dataset = dataset_ops.Dataset.range( 10).map(lambda _: random_ops.random_uniform([])).batch(10) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) get_next = self.getNext(dataset) self.evaluate(get_next()) @test_util.run_v1_only("b/123902160") def testSkipEagerOptimizationLargeInputFromTensor(self): input_t = array_ops.placeholder(dtypes.int32, (None, None, None)) dataset = dataset_ops.Dataset.from_tensors(input_t) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) iterator = dataset_ops.make_initializable_iterator(dataset) init_op = iterator.initializer get_next = iterator.get_next() with self.cached_session() as sess: sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)}) self.evaluate(get_next) @test_util.run_v1_only("b/123902160") def testSkipEagerOptimizationLargeInputFromTensorSlices(self): input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None)) dataset = dataset_ops.Dataset.from_tensor_slices(input_t) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) iterator = dataset_ops.make_initializable_iterator(dataset) init_op = iterator.initializer get_next = iterator.get_next() with self.cached_session() as sess: sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)}) self.evaluate(get_next) def testOptimizationNestedDataset(self): def flat_map_fn(_): dataset = dataset_ops.Dataset.from_tensors(0) dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"])) dataset = dataset.skip(0) # Should be removed by noop elimination dataset = dataset.cache() return dataset dataset = dataset_ops.Dataset.range(1) dataset = dataset.flat_map(flat_map_fn) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.noop_elimination = True dataset = dataset.with_options(options) self.assertDatasetProduces(dataset, expected_output=[0]) def testOptimizationNestedDatasetWithModifiedRetval(self): def flat_map_fn(_): dataset = dataset_ops.Dataset.from_tensors(0) dataset = dataset.apply(optimization.assert_next(["MapAndBatch"])) # Should be fused by map and batch fusion dataset = dataset.map(lambda x: x) dataset = dataset.batch(1) return dataset dataset = dataset_ops.Dataset.range(1) dataset = dataset.flat_map(flat_map_fn) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_and_batch_fusion = True dataset = dataset.with_options(options) self.assertDatasetProduces(dataset, expected_output=[[0]]) def testOptimizationThreadPoolDataset(self): dataset = dataset_ops.Dataset.range(10).batch(10) dataset = threadpool.override_threadpool( dataset, threadpool.PrivateThreadPool( 2, display_name="private_thread_pool_%d" % 2)) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_output=[list(range(10))], requires_initialization=True) def testOptimizationNonSerializable(self): dataset = dataset_ops.Dataset.from_tensors(0) dataset = dataset.apply(optimization.assert_next(["FiniteSkip"])) dataset = dataset.skip(0) # Should not be removed by noop elimination dataset = dataset.apply(optimization.non_serializable()) dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"])) dataset = dataset.skip(0) # Should be removed by noop elimination dataset = dataset.cache() options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.noop_elimination = True dataset = dataset.with_options(options) self.assertDatasetProduces(dataset, expected_output=[0]) def testOptimizationNonSerializableAsDirectInput(self): """Tests that non-serializable dataset can be OptimizeDataset's input.""" dataset = dataset_ops.Dataset.from_tensors(0) dataset = dataset.apply(optimization.non_serializable()) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.noop_elimination = True dataset = dataset.with_options(options) self.assertDatasetProduces(dataset, expected_output=[0]) @parameterized.named_parameters(_generate_captured_refvar_test_cases()) @test_util.run_v1_only("RefVariables are not supported in eager mode.") def testSkipEagerOptimizationWithCapturedRefVar(self, dataset_fn): """Tests that default optimizations are disabled with ref variables.""" variable = variable_scope.get_variable( "v", initializer=0, use_resource=False) assign_op = variable.assign_add(1) # Check that warning is logged. warnings.simplefilter("always") with warnings.catch_warnings(record=True) as w: unoptimized_dataset = dataset_fn(variable) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.noop_elimination = True options.experimental_optimization.map_and_batch_fusion = True optimized_dataset = unoptimized_dataset.with_options(options) optimized_it = dataset_ops.make_initializable_iterator(optimized_dataset) self.assertGreaterEqual(len(w), 1) expected = ("tf.data static optimizations are not compatible with " "tf.Variable. The following optimizations will be disabled: %s." " To enable optimizations, use resource variables instead by " "calling `tf.enable_resource_variables()` at the start of the " "program." % (", ".join(options._static_optimizations()))) self.assertTrue(any([expected in str(warning) for warning in w])) # Check that outputs are the same in the optimized and unoptimized cases, # when the variable value is changing. unoptimized_it = dataset_ops.make_initializable_iterator( unoptimized_dataset) with ops.control_dependencies([assign_op]): unoptimized_output = unoptimized_it.get_next() optimized_output = optimized_it.get_next() self.evaluate(variable.initializer) self.evaluate((unoptimized_it.initializer, optimized_it.initializer)) while True: try: unoptimized, optimized = self.evaluate((unoptimized_output, optimized_output)) self.assertEqual(unoptimized, optimized) except errors.OutOfRangeError: break def testOptimizationEnabledByDefault(self): """Tests that some optimizations are applied to datasets by default.""" options = dataset_ops.Options() expected_optimizations = [ "map_and_batch_fusion", "noop_elimination", "shuffle_and_repeat_fusion", ] self.assertEqual( set(options._static_optimizations()), set(expected_optimizations)) def testOptimizationDisableDefault(self): """Tests that we can disable all static optimizations enabled by default. If the `apply_default_optimizations` optimization options flag is False, only explicitly enabled optimizations will be applied. """ options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.hoist_random_uniform = True options.experimental_optimization.noop_elimination = True expected_optimizations = [ "hoist_random_uniform", "noop_elimination", ] self.assertEqual( set(options._static_optimizations()), set(expected_optimizations)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/optimize_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `MapFusion` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test def _map_fusion_test_cases(): """Generates test cases for the MapFusion optimization.""" identity = lambda x: x increment = lambda x: x + 1 def increment_and_square(x): y = x + 1 return y * y functions = [identity, increment, increment_and_square] tests = [] for i, fun1 in enumerate(functions): for j, fun2 in enumerate(functions): tests.append(( "Test{}{}".format(i, j), [fun1, fun2], )) for k, fun3 in enumerate(functions): tests.append(( "Test{}{}{}".format(i, j, k), [fun1, fun2, fun3], )) swap = lambda x, n: (n, x) tests.append(( "Swap1", [lambda x: (x, 42), swap], )) tests.append(( "Swap2", [lambda x: (x, 42), swap, swap], )) return tuple(tests) @test_util.run_all_in_graph_and_eager_modes class MapFusionTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters(*_map_fusion_test_cases()) def testMapFusion(self, functions): dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Map", "MemoryCacheImpl"])) for function in functions: dataset = dataset.map(function) dataset = dataset.cache() options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_fusion = True dataset = dataset.with_options(options) expected_output = [] for x in range(5): r = x for function in functions: if isinstance(r, tuple): r = function(*r) # Pass tuple as multiple arguments. else: r = function(r) expected_output.append(r) self.assertDatasetProduces(dataset, expected_output=expected_output) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/map_fusion_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `AutotuneBuffers` rewrite.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class InjectPrefetchTest(test_base.DatasetTestBase): def _enable_autotune_buffers(self, dataset): options = dataset_ops.Options() options.experimental_optimization.autotune_buffers = True return dataset.with_options(options) def testParallelMap(self): dataset = dataset_ops.Dataset.range(100) dataset = dataset.apply( optimization.assert_next(["ParallelMap", "Prefetch", "FiniteTake"])) dataset = dataset.map( lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset.take(50) dataset = self._enable_autotune_buffers(dataset) self.assertDatasetProduces(dataset, range(1, 51)) def testMapAndBatch(self): dataset = dataset_ops.Dataset.range(100) dataset = dataset.apply( optimization.assert_next(["MapAndBatch", "Prefetch", "FiniteTake"])) dataset = dataset.map( lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset.batch(10) dataset = dataset.take(5) dataset = self._enable_autotune_buffers(dataset) self.assertDatasetProduces( dataset, [list(range(i + 1, i + 11)) for i in range(0, 50, 10)]) def testParallelInterleaveV2(self): dataset = dataset_ops.Dataset.range(100) dataset = dataset.apply( optimization.assert_next( ["ParallelInterleaveV2", "Prefetch", "FiniteTake"])) dataset = dataset.interleave( lambda x: dataset_ops.Dataset.from_tensors(x + 1), num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset.take(50) dataset = self._enable_autotune_buffers(dataset) self.assertDatasetProduces(dataset, range(1, 51)) def testChainedParallelDatasets(self): dataset = dataset_ops.Dataset.range(100) dataset = dataset.apply( optimization.assert_next([ "ParallelMap", "Prefetch", "ParallelInterleaveV2", "Prefetch", "MapAndBatch", "Prefetch", "FiniteTake" ])) dataset = dataset.map( lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset.interleave( lambda x: dataset_ops.Dataset.from_tensors(x + 1), num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset.map( lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset.batch(1) dataset = dataset.take(50) dataset = self._enable_autotune_buffers(dataset) self.assertDatasetProduces(dataset, [[i] for i in range(3, 53)]) def testNoRegularMap(self): dataset = dataset_ops.Dataset.range(100) dataset = dataset.apply(optimization.assert_next(["Map", "FiniteTake"])) dataset = dataset.map(lambda x: x + 1).take(50) dataset = self._enable_autotune_buffers(dataset) self.assertDatasetProduces(dataset, range(1, 51)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/inject_prefetch_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `LatencyAllEdges` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.experimental.ops import stats_aggregator from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class LatencyAllEdgesTest(stats_dataset_test_base.StatsDatasetTestBase): def testLatencyStatsOptimization(self): aggregator = stats_aggregator.StatsAggregator() dataset = dataset_ops.Dataset.from_tensors(1).apply( optimization.assert_next( ["LatencyStats", "Map", "LatencyStats", "Prefetch", "LatencyStats"])).map(lambda x: x * x).prefetch(1) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_stats.latency_all_edges = True options.experimental_stats.aggregator = aggregator dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_output=[1], requires_initialization=True, num_test_iterations=1) handle = self.getHandle(aggregator) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::TensorDataset"), 1) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::MapDataset"), 1) self.assertStatisticsHasCount( handle, self.regexForNodeName("record_latency::PrefetchDataset"), 1) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `HoistRandomUniform` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.platform import test def _hoist_random_uniform_test_cases(): """Generates test cases for the HoistRandomUniform optimization.""" plus_one = lambda x: x + 1 def random(_): return random_ops.random_uniform([], minval=1, maxval=10, dtype=dtypes.float32, seed=42) def random_with_assert(x): y = random(x) assert_op = control_flow_ops.Assert(math_ops.greater_equal(y, 1), [y]) with ops.control_dependencies([assert_op]): return y twice_random = lambda x: (random(x) + random(x)) / 2. tests = [("PlusOne", plus_one, False), ("RandomUniform", random, True), ("RandomWithAssert", random_with_assert, True), ("TwiceRandom", twice_random, False)] return tuple(tests) @test_util.run_all_in_graph_and_eager_modes class HoistRandomUniformTest(test_base.DatasetTestBase, parameterized.TestCase): def _testDataset(self, dataset): previous_result = 0 get_next = self.getNext(dataset) for _ in range(5): result = self.evaluate(get_next()) self.assertLessEqual(1, result) self.assertLessEqual(result, 10) # This checks if the result is somehow random by checking if we are not # generating the same values. self.assertNotEqual(previous_result, result) previous_result = result with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @parameterized.named_parameters(*_hoist_random_uniform_test_cases()) def testHoisting(self, function, will_optimize): dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next( ["Zip[0]", "Map"] if will_optimize else ["Map"])).map(function) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.hoist_random_uniform = True dataset = dataset.with_options(options) self._testDataset(dataset) def testCapturedInputs(self): a = constant_op.constant(1, dtype=dtypes.float32) b = constant_op.constant(0, dtype=dtypes.float32) some_tensor = math_ops.mul(a, b) def random_with_capture(_): return some_tensor + random_ops.random_uniform( [], minval=1, maxval=10, dtype=dtypes.float32, seed=42) dataset = dataset_ops.Dataset.range(5).apply( optimization.assert_next(["Zip[0]", "Map"])).map(random_with_capture) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.hoist_random_uniform = True dataset = dataset.with_options(options) self._testDataset(dataset) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `FilterWithRandomUniformFusion` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.ops import random_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class FilterWithRandomUniformFusionTest(test_base.DatasetTestBase): def testFilterWithRandomUniformFusion(self): dataset = dataset_ops.Dataset.range(10000000).apply( optimization.assert_next(["Sampling"])) dataset = dataset.filter(lambda _: random_ops.random_uniform([]) < 0.05) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.filter_with_random_uniform_fusion = True dataset = dataset.with_options(options) get_next = self.getNext(dataset) self.evaluate(get_next()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/filter_with_random_uniform_fusion_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.experimental._ChooseFastestDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class ChooseFastestDatasetTest(test_base.DatasetTestBase, parameterized.TestCase): def testChooseFastestSimple(self): dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3, 4]) merge = optimization._ChooseFastestDataset([dataset, dataset]) self.assertDatasetProduces( merge, expected_output=[0, 1, 2, 3, 4], expected_shapes=dataset_ops.get_legacy_output_shapes(dataset)) def testChooseFastestManyInputs(self): dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3, 4]) merge = optimization._ChooseFastestDataset([dataset for _ in range(5)]) self.assertDatasetProduces( merge, expected_output=[0, 1, 2, 3, 4], expected_shapes=dataset_ops.get_legacy_output_shapes(dataset)) def testChooseFastest(self): dataset = dataset_ops.Dataset.range(600) f = lambda x: 2 * x dataset_a = dataset.batch(50).map(f) dataset_b = dataset.map(f).batch(50) merge = optimization._ChooseFastestDataset([dataset_a, dataset_b]) self.assertDatasetProduces( merge, expected_output=[ [i * 2 for i in range(j * 50, (j + 1) * 50)] for j in range(12) ], expected_shapes=dataset_ops.get_legacy_output_shapes(dataset_a)) @parameterized.named_parameters( ("Shapes", [0], [[1, 2, 3]], "must have compatible output shapes."), ("Types", [0], [0.0], "must have the same output types."), ("NumComponents", [0], ([0], [1]), "must have the same output types."), ("Cardinality", [1, 2, 3], [1], "must have compatible cardinalities.")) def testChooseFastestErrorWithIncompatibleInput(self, slices_a, slices_b, error_msg): dataset_a = dataset_ops.Dataset.from_tensor_slices(slices_a) dataset_b = dataset_ops.Dataset.from_tensor_slices(slices_b) # The error is raised at dataset creation time. if context.executing_eagerly(): with self.assertRaises(errors.InvalidArgumentError): merge = optimization._ChooseFastestDataset([dataset_a, dataset_b]) else: merge = optimization._ChooseFastestDataset([dataset_a, dataset_b]) self.assertDatasetProduces( merge, expected_error=(errors.InvalidArgumentError, error_msg)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_dataset_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `MapAndBatchFusion` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class MapAndBatchFusionTest(test_base.DatasetTestBase): def testMapAndBatchFusion(self): dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next( ["MapAndBatch"])).map(lambda x: x * x).batch(10) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_and_batch_fusion = True dataset = dataset.with_options(options) self.assertDatasetProduces( dataset, expected_output=[[x * x for x in range(10)]]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/map_and_batch_fusion_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `NoopElimination` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class NoopEliminationTest(test_base.DatasetTestBase): def testNoopElimination(self): a = constant_op.constant(1, dtype=dtypes.int64) b = constant_op.constant(2, dtype=dtypes.int64) some_tensor = math_ops.mul(a, b) dataset = dataset_ops.Dataset.range(5) dataset = dataset.apply( optimization.assert_next( ["FiniteRepeat", "FiniteSkip", "Prefetch", "MemoryCacheImpl"])) dataset = dataset.repeat(some_tensor).skip(5).take(-1).skip(0).repeat( 1).prefetch(0).prefetch(1).cache() options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.noop_elimination = True dataset = dataset.with_options(options) self.assertDatasetProduces(dataset, expected_output=range(5)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `MapAndFilterFusion` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.platform import test def _map_and_filter_fusion_test_cases(): """Generates test cases for the MapAndFilterFusion optimization.""" identity = lambda x: x increment = lambda x: x + 1 minus_five = lambda x: x - 5 def increment_and_square(x): y = x + 1 return y * y take_all = lambda x: constant_op.constant(True) is_zero = lambda x: math_ops.equal(x, 0) is_odd = lambda x: math_ops.equal(x % 2, 0) greater = lambda x: math_ops.greater(x + 5, 0) functions = [identity, increment, minus_five, increment_and_square] filters = [take_all, is_zero, is_odd, greater] tests = [] for x, fun in enumerate(functions): for y, predicate in enumerate(filters): tests.append(("Mixed{}{}".format(x, y), fun, predicate)) # Multi output tests.append(("Multi1", lambda x: (x, x), lambda x, y: constant_op.constant(True))) tests.append( ("Multi2", lambda x: (x, 2), lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0))) return tuple(tests) @test_util.run_all_in_graph_and_eager_modes class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase): def _testMapAndFilter(self, dataset, function, predicate): expected_output = [] for x in range(10): r = function(x) if isinstance(r, tuple): b = predicate(*r) # Pass tuple as multiple arguments. else: b = predicate(r) if self.evaluate(b): expected_output.append(r) self.assertDatasetProduces(dataset, expected_output=expected_output) @parameterized.named_parameters(*_map_and_filter_fusion_test_cases()) def testMapFilterFusion(self, function, predicate): dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next(["Map", "Filter", "Map"])).map(function).filter(predicate) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_and_filter_fusion = True dataset = dataset.with_options(options) self._testMapAndFilter(dataset, function, predicate) def testCapturedInputs(self): a = constant_op.constant(3, dtype=dtypes.int64) b = constant_op.constant(4, dtype=dtypes.int64) some_tensor = math_ops.mul(a, b) function = lambda x: x * x def predicate(y): return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor) # We are currently not supporting functions with captured inputs. dataset = dataset_ops.Dataset.range(10).apply( optimization.assert_next(["Map", "Filter"])).map(function).filter(predicate) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_and_filter_fusion = True dataset = dataset.with_options(options) self._testMapAndFilter(dataset, function, predicate) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the `MapVectorization` optimization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import bitwise_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import parsing_ops from tensorflow.python.platform import test def _generate_unary_cwise_math_cases(): # TODO(rachelim): Consolidate tests with pfor when APIs are somewhat shared. bitwise_cases = [("Invert", bitwise_ops.invert)] logical_cases = [("LogicalNot", math_ops.logical_not)] complex_cases = [ ("Angle", math_ops.angle), ("ComplexAbs", math_ops.abs), ("Conj", math_ops.conj), ("Imag", math_ops.imag), ("Real", math_ops.real), ] real_cases = [ ("Abs", math_ops.abs), ("Acos", math_ops.acos), ("Acosh", lambda x: math_ops.acosh(1 + math_ops.square(x))), ("Asin", math_ops.asin), ("Asinh", math_ops.asinh), ("Atan", math_ops.atan), ("Atanh", math_ops.atanh), ("BesselI0e", math_ops.bessel_i0e), ("BesselI1e", math_ops.bessel_i1e), ("Ceil", math_ops.ceil), ("Cos", math_ops.cos), ("Cosh", math_ops.cosh), ("Digamma", math_ops.digamma), ("Elu", nn.elu), ("Erf", math_ops.erf), ("Erfc", math_ops.erfc), ("Exp", math_ops.exp), ("Expm1", math_ops.expm1), ("Floor", math_ops.floor), ("Inv", math_ops.inv), ("IsFinite", math_ops.is_finite), ("IsInf", math_ops.is_inf), ("Lgamma", math_ops.lgamma), ("Log", math_ops.log), ("Log1p", math_ops.log1p), ("Neg", math_ops.negative), ("Reciprocal", math_ops.reciprocal), ("Relu", nn.relu), ("Relu6", nn.relu6), ("Rint", math_ops.rint), ("Round", math_ops.round), ("Rsqrt", math_ops.rsqrt), ("Selu", nn.selu), ("Sigmoid", math_ops.sigmoid), ("Sign", math_ops.sign), ("Sin", math_ops.sin), ("Sinh", math_ops.sinh), ("Softplus", nn.softplus), ("Softsign", nn.softsign), ("Sqrt", math_ops.sqrt), ("Square", math_ops.square), ("Tan", math_ops.tan), ("Tanh", math_ops.tanh), ] random_input = np.random.rand(3, 5) complex_component = np.random.rand(3, 5) random_int = np.random.randint(0, 10, (7, 3, 5)) def bitwise_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices(random_int) def logical_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices(random_input > 0) def random_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices(random_input) def complex_dataset_factory(): return dataset_ops.Dataset.from_tensor_slices( math_ops.complex(random_input, complex_component)) case_factory_pairs = [ (bitwise_cases, bitwise_dataset_factory), (logical_cases, logical_dataset_factory), (complex_cases, complex_dataset_factory), (real_cases, random_dataset_factory), ] return [(case[0], case[1], factory) for cases, factory in case_factory_pairs for case in cases] def _generate_binary_cwise_math_cases(): bitwise_cases = [("BitwiseAnd", bitwise_ops.bitwise_and), ("BitwiseOr", bitwise_ops.bitwise_or), ("BitwiseXor", bitwise_ops.bitwise_xor), ("LeftShift", bitwise_ops.left_shift), ("RightShift", bitwise_ops.right_shift)] logical_cases = [("LogicalAnd", math_ops.logical_and), ("LogicalOr", math_ops.logical_or)] # Wrapper functions restricting the range of inputs of zeta and polygamma. def safe_polygamma(x, y): return math_ops.polygamma( math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1) def safe_zeta(x, y): return math_ops.zeta(x * x + 1, y * y) real_cases = [ ("Add", math_ops.add), ("AddV2", math_ops.add_v2), ("Atan2", math_ops.atan2), ("Complex", math_ops.complex), ("DivNoNan", math_ops.div_no_nan), ("Equal", math_ops.equal), ("FloorDiv", math_ops.floor_div), ("FloorMod", math_ops.floor_mod), ("Greater", math_ops.greater), ("GreaterEqual", math_ops.greater_equal), ("Igamma", math_ops.igamma), ("Igammac", math_ops.igammac), ("IgammaGradA", math_ops.igamma_grad_a), ("Less", math_ops.less), ("LessEqual", math_ops.less_equal), ("Maximum", math_ops.maximum), ("Minimum", math_ops.minimum), ("Mod", math_ops.mod), ("Mul", math_ops.multiply), ("NotEqual", math_ops.not_equal), ("Polygamma", safe_polygamma), ("Pow", math_ops.pow), ("RealDiv", math_ops.divide), ("SquareDifference", math_ops.squared_difference), ("Sub", math_ops.subtract), ("TruncateMod", math_ops.truncate_mod), ("Zeta", safe_zeta), ] # Exercises broadcasting capabilities x = np.random.rand(7, 3, 5) y = np.random.rand(3, 5) x_int = np.random.randint(0, 10, (7, 3, 5)) y_int = np.random.randint(0, 10, (3, 5)) def bitwise_dataset_factory(): return dataset_ops.Dataset.from_tensors((x_int, y_int)) def logical_dataset_factory(): return dataset_ops.Dataset.from_tensors((x > 0, y > 0)) def random_dataset_factory(): return dataset_ops.Dataset.from_tensors((x, y)) case_factory_pairs = [ (bitwise_cases, bitwise_dataset_factory), (logical_cases, logical_dataset_factory), (real_cases, random_dataset_factory), ] return [(case[0], case[1], factory) for cases, factory in case_factory_pairs for case in cases] def _generate_cwise_test_cases(): return _generate_unary_cwise_math_cases() + _generate_binary_cwise_math_cases( ) def _generate_csv_test_case(): def csv_factory(): return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a", "2.4:5:c"]).repeat(5) def decode_csv_fn(x): return parsing_ops.decode_csv( x, record_defaults=[ constant_op.constant([], dtypes.float32), constant_op.constant([], dtypes.int32), constant_op.constant([], dtypes.string) ], field_delim=":") return decode_csv_fn, csv_factory def _generate_parse_single_example_test_case(): # When sparse tensors are used, map_vectorization is not # attempted because the output_shapes of the map dataset are not defined. # TODO(rachelim): Consider being more lax with checking the output_shapes of # the map node. def parse_example_factory(): def _int64_feature(*values): return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values)) def _bytes_feature(*values): return feature_pb2.Feature( bytes_list=feature_pb2.BytesList( value=[v.encode("utf-8") for v in values])) return dataset_ops.Dataset.from_tensor_slices( constant_op.constant([ example_pb2.Example( features=feature_pb2.Features( feature={ "dense_int": _int64_feature(i), "dense_str": _bytes_feature(str(i)), })).SerializeToString() for i in range(10) ])) def parse_single_example_fn(x): features = { "dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0), "dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""), } return parsing_ops.parse_single_example(x, features) return parse_single_example_fn, parse_example_factory def _generate_optimization_test_cases(): def base_dataset_factory(): return dataset_ops.Dataset.from_tensors(np.random.rand(10, 3)).repeat(5) rand_val = np.random.rand(1, 1, 1, 1, 1, 1) csv_test_case = _generate_csv_test_case() parse_fn, parse_base = _generate_parse_single_example_test_case() def dense_output_only_parse_fn(x): # Since we haven't implemented a vectorizer for SerializeSparse, any # function with sparse outputs will only be naively vectorized. parse_result = parse_fn(x) return [ y for y in parse_result if not isinstance(y, sparse_tensor.SparseTensor) ] def map_fn_with_cycle(x): c = lambda i: math_ops.less(i, 10) b = lambda i: math_ops.add(i, 1) return control_flow_ops.while_loop(c, b, [x]) # Misc test cases test_cases = [ ("Basic", lambda x: (x, x + 1), base_dataset_factory), ("Broadcast", lambda x: x + rand_val, base_dataset_factory), ("Cycle", map_fn_with_cycle, lambda: dataset_ops.Dataset.from_tensors(1)), ("Const", lambda x: 2, base_dataset_factory), ("Cast", lambda x: math_ops.cast(x, dtypes.float64), base_dataset_factory), ("Reshape", lambda x: array_ops.reshape(x, (-1, 30)), base_dataset_factory), ("Transpose", array_ops.transpose, base_dataset_factory), ("Unpack", array_ops.unstack, base_dataset_factory), ("UnpackNegativeAxis", lambda x: array_ops.unstack(x, axis=-1), base_dataset_factory), # Parsing ops ("DecodeCSV", csv_test_case[0], csv_test_case[1]), ("ParseSingleExample", parse_fn, parse_base), ("ParseSingleExampleDenseOutputOnly", dense_output_only_parse_fn, parse_base), ] + _generate_cwise_test_cases() return [{ "testcase_name": x[0] + "Parallel" if num_parallel_calls is not None else x[0], "map_fn": x[1], "base_dataset_factory": x[2], "num_parallel_calls": num_parallel_calls } for x in test_cases for num_parallel_calls in (None, 12)] @test_util.run_all_in_graph_and_eager_modes class MapVectorizationTest(test_base.DatasetTestBase, parameterized.TestCase): def _enable_map_vectorization(self, dataset, use_choose=True): options = dataset_ops.Options() opt_options = options.experimental_optimization opt_options.map_vectorization.enabled = True opt_options.map_vectorization.use_choose_fastest = use_choose return dataset.with_options(options) def _get_test_datasets(self, base_dataset, map_fn, num_parallel_calls=None, expect_optimized=True): """Given base dataset and map fn, creates test datasets. Returns a tuple of (unoptimized dataset, optimized dataset). The unoptimized dataset has the assertion that Batch follows Map. The optimized dataset has the assertion that Map follows Batch, and has the "map_vectorization" optimization applied. Args: base_dataset: Input dataset to map->batch map_fn: Map function to use num_parallel_calls: (Optional.) num_parallel_calls argument for map expect_optimized: (Optional.) Whether we expect the optimization to take place, in which case we will assert that Batch is followed by Map, otherwise Map followed by Batch. Defaults to True. Returns: Tuple of (unoptimized dataset, optimized dataset). """ map_node_name = "Map" if num_parallel_calls is None else "ParallelMap" def _make_dataset(node_names): dataset = base_dataset.apply(optimization.assert_next(node_names)) dataset = dataset.map(map_fn, num_parallel_calls) dataset = dataset.batch(100) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.map_and_batch_fusion = False dataset = dataset.with_options(options) return dataset unoptimized = _make_dataset([map_node_name, "BatchV2"]) # Note that because of the `ChooseDataset` fork, we can't use `assert_next` # to verify the optimization result. optimized = _make_dataset(["ChooseFastestBranch"] if expect_optimized else [map_node_name, "BatchV2"]) optimized = self._enable_map_vectorization(optimized) return unoptimized, optimized @parameterized.named_parameters(_generate_optimization_test_cases()) def testOptimization(self, map_fn, base_dataset_factory, num_parallel_calls): base_dataset = base_dataset_factory() unoptimized, optimized = self._get_test_datasets(base_dataset, map_fn, num_parallel_calls) self.assertDatasetsEqual(unoptimized, optimized) def testOptimizationBadMapFn(self): # Test map functions that give an error def map_fn(x): # x has leading dimension 5, this will raise an error return array_ops.gather(x, 10) with self.assertRaisesRegexp(errors.InvalidArgumentError, r"indices = 10 is not in \[0, 5\)"): base_dataset = dataset_ops.Dataset.range(5).repeat(5).batch( 5, drop_remainder=True) _, optimized = self._get_test_datasets(base_dataset, map_fn) nxt = dataset_ops.make_one_shot_iterator(optimized).get_next() self.evaluate(nxt) def testOptimizationWithCapturedInputs(self): # Tests that vectorization works with captured inputs. y = constant_op.constant(1, shape=(2,)) z = constant_op.constant(2, shape=(2,)) def map_fn(x): return x, y, z base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2], [3, 4]]).repeat(5) unoptimized, optimized = self._get_test_datasets( base_dataset, map_fn, expect_optimized=True) self.assertDatasetsEqual(optimized, unoptimized) def testOptimizationWithMapAndBatchFusion(self): # Tests that vectorization works on fused map and batch. def map_fn(x): return x**2 base_dataset = dataset_ops.Dataset.range(1000) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False base_dataset = base_dataset.with_options(options) def _make_dataset(node_names): dataset = base_dataset.apply(optimization.assert_next(node_names)) dataset = dataset.apply(batching.map_and_batch(map_fn, 100)) return dataset unoptimized = _make_dataset(["MapAndBatch"]) optimized = _make_dataset(["ChooseFastestBranch"]) optimized = self._enable_map_vectorization(optimized) self.assertDatasetsEqual(optimized, unoptimized) @parameterized.named_parameters( ("1", True, True), ("2", True, False), ("3", False, True), ("4", False, False), ) def testOptimizationWithChainedMapAndBatch(self, fuse_first, fuse_second): # Tests that vectorization works on chained map and batch functions. def map_fn(x): return x * 2 unoptimized_seq = [] def make_apply_fn(is_fused): if is_fused: unoptimized_seq.append("MapAndBatch") def apply_fn(dataset): return dataset.apply( batching.map_and_batch(map_fn, 2, 12, drop_remainder=True)) return apply_fn else: unoptimized_seq.extend(["ParallelMap", "BatchV2"]) def apply_fn(dataset): return dataset.map(map_fn, 12).batch(2, drop_remainder=True) return apply_fn base_dataset = dataset_ops.Dataset.range(1000) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False base_dataset = base_dataset.with_options(options) apply_fn_1 = make_apply_fn(fuse_first) apply_fn_2 = make_apply_fn(fuse_second) def make_dataset(node_names): dataset = base_dataset.apply(optimization.assert_next(node_names)) dataset = apply_fn_1(dataset) dataset = apply_fn_2(dataset) return dataset unoptimized = make_dataset(unoptimized_seq) optimized = make_dataset(["ChooseFastestBranch", "ChooseFastestBranch"]) optimized = self._enable_map_vectorization(optimized) self.assertDatasetsEqual(optimized, unoptimized) def testOptimizationIgnoreStateful(self): def map_fn(x): with ops.control_dependencies([check_ops.assert_equal(x, np.int64(0))]): return array_ops.identity(x) dataset = dataset_ops.Dataset.range(10) dataset = dataset.map(map_fn) dataset = dataset.batch(10) dataset = self._enable_map_vectorization(dataset, use_choose=False) with self.assertRaises(errors.InvalidArgumentError): get_next = self.getNext(dataset) self.evaluate(get_next()) def testOptimizationIgnoreRagged(self): # Make sure we ignore inputs that might not be uniformly sized def map_fn(x): return array_ops.gather(x, np.int64(0)) # output_shape = (?,) base_dataset = dataset_ops.Dataset.range(20).batch(3, drop_remainder=False) unoptimized, optimized = self._get_test_datasets( base_dataset, map_fn, expect_optimized=False) self.assertDatasetsEqual(unoptimized, optimized) def testOptimizationIgnoreRaggedMap(self): # Don't optimize when the output of the map fn shapes are unknown. def map_fn(x): return array_ops.tile(x, x) dataset = dataset_ops.Dataset.range(10).batch(1) dataset = dataset.map(map_fn) dataset = dataset.batch(10) dataset = self._enable_map_vectorization(dataset, use_choose=False) with self.assertRaises(errors.InvalidArgumentError): get_next = self.getNext(dataset) self.evaluate(get_next()) def testOptimizationWithUnknownBatchShape(self): tensor = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) # Datasets with sparse tensors have unknown output shapes. base_dataset = dataset_ops.Dataset.from_tensors(tensor) unoptimized = base_dataset.apply(batching.map_and_batch(lambda x: x, 2)) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False unoptimized = unoptimized.with_options(options) optimized = self._enable_map_vectorization(unoptimized) self.assertDatasetsEqual(unoptimized, optimized) def testOptimizationWithSparseTensor(self): base_dataset = dataset_ops.Dataset.from_tensors(0) def map_fn(x): del x return sparse_tensor.SparseTensor( indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]) # Datasets with sparse tensors have unknown output shapes. unoptimized = base_dataset.apply(batching.map_and_batch(map_fn, 2)) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False unoptimized = unoptimized.with_options(options) optimized = self._enable_map_vectorization(unoptimized) self.assertDatasetsEqual(unoptimized, optimized) def testOptimizationWithPrefetch(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.map(lambda x: x) dataset = dataset.prefetch(1) dataset = dataset.batch(10) dataset = self._enable_map_vectorization(dataset) self.assertDatasetProduces(dataset, [list(range(10))]) def testOptimizationWithoutChooseFastest(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.map(lambda x: x**2) dataset = dataset.batch(10) dataset = self._enable_map_vectorization(dataset, use_choose=False) self.assertDatasetProduces(dataset, [[x**2 for x in range(10)]]) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the OptimizeDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class OptimizeDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def testCore(self): def build_dataset(num_elements, batch_size): return dataset_ops.Dataset.range(num_elements).map(lambda x: x * x).batch( batch_size).apply(optimization.optimize(["map_and_batch_fusion"])) self.run_core_tests(lambda: build_dataset(200, 10), 20) def testWithNewFunction(self): """Tests that optimized datasets with new functions work.""" def build_dataset(): dataset = dataset_ops.Dataset.range(100) dataset = dataset.map(lambda x: x) dataset = dataset.batch(5) # map_vectorization adds a new vectorized function to the function # library. dataset = dataset.apply(optimization.optimize(["map_vectorization"])) return dataset self.run_core_tests(build_dataset, 20) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/optimize_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the UniqueDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import unique from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class UniqueDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def testUnique(self): def build_dataset(num_elements, unique_elem_range): return dataset_ops.Dataset.range(num_elements).map( lambda x: x % unique_elem_range).apply(unique.unique()) self.run_core_tests(lambda: build_dataset(200, 100), 100) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/unique_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the dataset constructors serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.platform import test class FromTensorsSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_tensor_dataset(self, variable_array): components = (variable_array, np.array([1, 2, 3]), np.array(37.0)) return dataset_ops.Dataset.from_tensors(components) def testFromTensorsCore(self): # Equal length components arr = np.array(1) num_outputs = 1 self.run_core_tests(lambda: self._build_tensor_dataset(arr), num_outputs) class FromTensorSlicesSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_tensor_slices_dataset(self, components): return dataset_ops.Dataset.from_tensor_slices(components) def testFromTensorSlicesCore(self): # Equal length components components = (np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 22), np.array([37.0, 38.0, 39.0, 40.0])) dict_components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]} self.run_core_tests(lambda: self._build_tensor_slices_dataset(components), 4) self.run_core_tests( lambda: self._build_tensor_slices_dataset(dict_components), 3) class FromSparseTensorSlicesSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_sparse_tensor_slice_dataset(self, slices): indices = np.array( [[i, j] for i in range(len(slices)) for j in range(len(slices[i]))], dtype=np.int64) values = np.array([val for s in slices for val in s], dtype=np.float64) dense_shape = np.array( [len(slices), max(len(s) for s in slices) + 1], dtype=np.int64) sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape) return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components) def testFromSparseTensorSlicesCore(self): slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []] self.run_core_tests( lambda: self._build_sparse_tensor_slice_dataset(slices), 9, sparse_tensors=True) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/dataset_constructor_serialization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the GroupByReducer serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class GroupByReducerSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_dataset(self, components): reducer = grouping.Reducer( init_func=lambda _: np.int64(0), reduce_func=lambda x, y: x + y, finalize_func=lambda x: x) return dataset_ops.Dataset.from_tensor_slices(components).apply( grouping.group_by_reducer(lambda x: x % 5, reducer)) def testCoreGroupByReducer(self): components = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.int64) self.verify_unused_iterator( lambda: self._build_dataset(components), 5, verify_exhausted=True) self.verify_multiple_breaks( lambda: self._build_dataset(components), 5, verify_exhausted=True) self.verify_reset_restored_iterator( lambda: self._build_dataset(components), 5, verify_exhausted=True) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/group_by_reducer_serialization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ParseExampleDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.platform import test class ParseExampleDatasetSerializationTest( reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase, dataset_serialization_test_base.DatasetSerializationTestBase): def ParseExampleDataset(self, num_repeat, batch_size): return self.make_batch_feature( filenames=self.test_filenames, num_epochs=num_repeat, batch_size=batch_size, reader_num_threads=5, parser_num_threads=10) def testSerializationCore(self): num_repeat = 5 batch_size = 2 num_outputs = self._num_records * self._num_files * num_repeat // batch_size # pylint: disable=g-long-lambda self.run_core_tests( lambda: self.ParseExampleDataset( num_repeat=num_repeat, batch_size=batch_size), num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/parse_example_dataset_serialization_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the TakeWhileDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import take_while_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class TakeWhileDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase, parameterized.TestCase): def _build_dataset(self, num_elements, upper_bound): return dataset_ops.Dataset.range(num_elements).apply( take_while_ops.take_while(lambda x: x < upper_bound)) @parameterized.parameters((23, 7), (10, 0), (25, 25)) def testCore(self, num_elem, upper_bound): self.run_core_tests(lambda: self._build_dataset(num_elem, upper_bound), upper_bound) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/take_while_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the MapDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import function from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test class MapDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def setUp(self): self._tensor_slice_len = 7 self._num_epochs = 14 self._num_outputs = self._tensor_slice_len * self._num_epochs def _build_ds(self, multiplier=37.0): components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) * np.arange(self._tensor_slice_len)[:, np.newaxis], np.array(multiplier) * np.arange(self._tensor_slice_len)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) return ( dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn) .repeat(self._num_epochs)) def testSaveRestoreCore(self): self.run_core_tests(self._build_ds, self._num_outputs) def testSaveStatefulFunction(self): def _build_ds(): def _map_fn(x): return random_ops.random_uniform( (), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32) return dataset_ops.Dataset.range(100).map(_map_fn) self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError) def testCaptureVariableInMapFn(self): def _build_ds(): counter_var = variable_scope.get_variable( "counter", (), dtypes.int32, use_resource=True) return (dataset_ops.Dataset.from_tensors(0).repeat(10).map( lambda _: counter_var.assign_add(1))) self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError) def testCaptureConstantInMapFn(self): def _build_ds(): constant_var = constant_op.constant(5) return (dataset_ops.Dataset.from_tensors(0).repeat(10).map( lambda x: x + constant_var)) self.run_core_tests(_build_ds, 10) def testCaptureDefunInMapFn(self): num_outputs = 100 def _build_ds(): @function.Defun(dtypes.int64) def defun_fn(x): return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32) return dataset_ops.Dataset.range(num_outputs).map(defun_fn) self.run_core_tests(_build_ds, num_outputs) def testBuildDefunInMapFn(self): num_outputs = 100 def _build_ds(): @function.Defun(dtypes.int64) def defun_fn(x): @function.Defun(dtypes.int32) def defun_fn_deep(x): return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32) return constant_op.constant(11000) + defun_fn_deep( math_ops.cast(x, dtypes.int32)) return dataset_ops.Dataset.range(num_outputs).map(defun_fn) self.run_core_tests(_build_ds, num_outputs) def testSparseCore(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=np.array([[0, 0]]), values=(i * np.array([1])), dense_shape=np.array([1, 1])) def _build_ds(num_outputs): return dataset_ops.Dataset.range(num_outputs).map(_sparse) num_outputs = 10 self.run_core_tests(lambda: _build_ds(num_outputs), num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/map_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the FilterDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class FilterDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_filter_range_graph(self, div): return dataset_ops.Dataset.range(100).filter( lambda x: math_ops.not_equal(math_ops.mod(x, div), 2)) def testFilterCore(self): div = 3 num_outputs = sum(x % 3 != 2 for x in range(100)) self.run_core_tests(lambda: self._build_filter_range_graph(div), num_outputs) def _build_filter_dict_graph(self): return dataset_ops.Dataset.range(10).map( lambda x: {"foo": x * 2, "bar": x ** 2}).filter( lambda d: math_ops.equal(d["bar"] % 2, 0)).map( lambda d: d["foo"] + d["bar"]) def testFilterDictCore(self): num_outputs = sum((x**2) % 2 == 0 for x in range(10)) self.run_core_tests(self._build_filter_dict_graph, num_outputs) def _build_sparse_filter(self): def _map_fn(i): return sparse_tensor.SparseTensor( indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i def _filter_fn(_, i): return math_ops.equal(i % 2, 0) return dataset_ops.Dataset.range(10).map(_map_fn).filter(_filter_fn).map( lambda x, i: x) def testSparseCore(self): num_outputs = 5 self.run_core_tests(self._build_sparse_filter, num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/filter_dataset_serialization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the UnbatchDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class UnbatchDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2): components = ( np.arange(tensor_slice_len), np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis], np.array(multiplier) * np.arange(tensor_slice_len)) return dataset_ops.Dataset.from_tensor_slices(components).batch( batch_size).apply(batching.unbatch()) def testCore(self): tensor_slice_len = 8 batch_size = 2 num_outputs = tensor_slice_len self.run_core_tests( lambda: self.build_dataset(15.0, tensor_slice_len, batch_size), num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/unbatch_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ShuffleDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools from absl.testing import parameterized from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import ops from tensorflow.python.platform import test from tensorflow.python.training import saver as saver_lib class ShuffleDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase, parameterized.TestCase): def _build_shuffle_dataset( self, range_limit=10, num_repeats=5, buffer_size=5, seed=None, reshuffle_each_iteration=None, ): dataset = dataset_ops.Dataset.range(range_limit).shuffle( buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration).repeat(num_repeats) # TODO(b/138399725): Re-enable default optimizations. options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False return dataset.with_options(options) @parameterized.parameters(itertools.product([True, False], [1, 3, 5, 8, 10])) def testShuffleCore(self, reshuffle_each_iteration, buffer_size): seed = 55 range_limit = 5 num_repeats = 2 num_outputs = range_limit * num_repeats # pylint: disable=g-long-lambda self.run_core_tests( lambda: self._build_shuffle_dataset( range_limit=range_limit, num_repeats=num_repeats, buffer_size=buffer_size, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration), num_outputs) # TODO(b/133780904): Re-enable this test once randomness state is hoisted out # of the input pipeline. @parameterized.parameters(itertools.product([True, False], [1, 3, 5, 8, 10])) def _testNonDeterministicSeeding(self, reshuffle_each_iteration, buffer_size): range_limit = 5 num_repeats = 2 num_outputs = range_limit * num_repeats def ds_fn(): # pylint: disable=cell-var-from-loop return self._build_shuffle_dataset( range_limit=range_limit, num_repeats=num_repeats, buffer_size=buffer_size, seed=None, # Iterator seeds are generated non-deterministically. reshuffle_each_iteration=reshuffle_each_iteration) # pylint: enable=cell-var-from-loop # We checkpoint the initial state of the Dataset so that we can restore # the seeds in the next run. Since the seeding is non-deterministic # the dataset gets initialized with different seeds each time. expected = self.gen_outputs( ds_fn, break_points=[0], num_outputs=num_outputs, ckpt_saved=False, verify_exhausted=False, save_checkpoint_at_end=False) actual = self.gen_outputs( ds_fn, break_points=self.gen_break_points(num_outputs), num_outputs=num_outputs, ckpt_saved=True, verify_exhausted=False) self.match(expected, actual) @parameterized.parameters(itertools.product([True, False], [1, 3, 5, 8, 10])) def testMultipleIterators(self, reshuffle_each_iteration, buffer_size): range_limit = 5 num_repeats = 2 num_outputs = range_limit * num_repeats def ds_fn(): # pylint: disable=cell-var-from-loop return self._build_shuffle_dataset( range_limit=range_limit, num_repeats=num_repeats, buffer_size=buffer_size, seed=None, # Iterator seeds are generated non-deterministically. reshuffle_each_iteration=reshuffle_each_iteration) # pylint: enable=cell-var-from-loop with ops.Graph().as_default() as g: ds = ds_fn() iterators = [ds.make_one_shot_iterator(), ds.make_one_shot_iterator()] get_next_ops = [it.get_next() for it in iterators] saveables = [ contrib_iterator_ops.make_saveable_from_iterator(it) for it in iterators ] for saveable in saveables: ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) saver = saver_lib.Saver(allow_empty=True) with self.session(graph=g) as sess: self._save(sess, saver) expected = [self.evaluate(get_next_ops) for _ in range(num_outputs)] self._restore(saver, sess) actual = [self.evaluate(get_next_ops) for _ in range(num_outputs)] self.match(expected, actual) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/shuffle_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ParallelMapDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import function from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test class ParallelMapDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def setUp(self): self._tensor_slice_len = 7 self._num_epochs = 1 self._num_outputs = self._tensor_slice_len * self._num_epochs def _build_ds(self, multiplier=37.0): components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) * np.arange(self._tensor_slice_len)[:, np.newaxis], np.array(multiplier) * np.arange(self._tensor_slice_len)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) return (dataset_ops.Dataset.from_tensor_slices(components).map( _map_fn, num_parallel_calls=3).repeat(self._num_epochs)) def _build_ds_with_prefetch(self, multiplier=37.0): components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) * np.arange(self._tensor_slice_len)[:, np.newaxis], np.array(multiplier) * np.arange(self._tensor_slice_len)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) return (dataset_ops.Dataset.from_tensor_slices(components).map( _map_fn, num_parallel_calls=3).repeat(self._num_epochs).prefetch(5)) def testSaveRestoreCore(self): for ds_fn in [self._build_ds, self._build_ds_with_prefetch]: self.run_core_tests(ds_fn, self._num_outputs) def testSaveStatefulFunction(self): def _build_ds(): def _map_fn(x): return random_ops.random_uniform( (), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32) return dataset_ops.Dataset.range(100).map( _map_fn, num_parallel_calls=2).prefetch(2) self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError) def testCaptureVariableInMapFn(self): def _build_ds(): counter_var = variable_scope.get_variable( "counter", (), dtypes.int32, use_resource=True) return (dataset_ops.Dataset.from_tensors(0).repeat(10).map( lambda _: counter_var.assign_add(1), num_parallel_calls=2).prefetch(2)) self.verify_error_on_save(_build_ds, 15, errors.FailedPreconditionError) def testCaptureConstantInMapFn(self): def _build_ds(): constant_var = constant_op.constant(5) return (dataset_ops.Dataset.from_tensors(0).repeat(10).map( lambda x: x + constant_var, num_parallel_calls=2).prefetch(2)) self.run_core_tests(_build_ds, 10) def testCaptureDefunInMapFn(self): num_outputs = 100 def _build_ds(): @function.Defun(dtypes.int64) def defun_fn(x): return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32) return dataset_ops.Dataset.range(num_outputs).map( defun_fn, num_parallel_calls=2).prefetch(2) self.run_core_tests(_build_ds, num_outputs) def testBuildDefunInMapFn(self): num_outputs = 100 def _build_ds(): @function.Defun(dtypes.int64) def defun_fn(x): @function.Defun(dtypes.int32) def defun_fn_deep(x): return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32) return constant_op.constant(11000) + defun_fn_deep( math_ops.cast(x, dtypes.int32)) return dataset_ops.Dataset.range(num_outputs).map( defun_fn, num_parallel_calls=2).prefetch(2) self.run_core_tests(_build_ds, num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/parallel_map_dataset_serialization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the SqlDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.experimental.kernel_tests import sql_dataset_test_base from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import readers from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class SqlDatasetSerializationTest( sql_dataset_test_base.SqlDatasetTestBase, dataset_serialization_test_base.DatasetSerializationTestBase): def _build_dataset(self, num_repeats): data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite") driver_name = array_ops.placeholder_with_default( array_ops.constant("sqlite", dtypes.string), shape=[]) query = ("SELECT first_name, last_name, motto FROM students ORDER BY " "first_name DESC") output_types = (dtypes.string, dtypes.string, dtypes.string) return readers.SqlDataset(driver_name, data_source_name, query, output_types).repeat(num_repeats) def testSQLSaveable(self): num_repeats = 4 num_outputs = num_repeats * 2 self.run_core_tests(lambda: self._build_dataset(num_repeats), num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/sql_dataset_serialization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Integration test for dataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.platform import test from tensorflow.python.training import saver as saver_lib class SerializationIntegrationTest(test.TestCase): def _build_input_pipeline(self, name, num_outputs): with ops.name_scope(name): ds = dataset_ops.Dataset.range(num_outputs).shuffle( 10, reshuffle_each_iteration=False).prefetch(10) iterator = ds.make_initializable_iterator() saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) return iterator.initializer, iterator.get_next() def _build_graph(self, num_pipelines, num_outputs): init_ops = [] get_next_ops = [] for i in range(num_pipelines): name = "input_pipeline_%d" % i init_op, get_next_op = self._build_input_pipeline(name, num_outputs) init_ops.append(init_op) get_next_ops.append(get_next_op) saver = saver_lib.Saver() return init_ops, get_next_ops, saver def _ckpt_path(self): return os.path.join(self.get_temp_dir(), "iterator") def testConcurrentSaves(self): num_pipelines = 100 num_outputs = 100 break_point = 10 all_outputs = [[] for _ in range(num_pipelines)] with ops.Graph().as_default() as g: init_ops, get_next_ops, saver = self._build_graph(num_pipelines, num_outputs) with self.session(graph=g) as sess: self.evaluate(init_ops) for _ in range(break_point): output = self.evaluate(get_next_ops) for i in range(num_pipelines): all_outputs[i].append(output[i]) saver.save(sess, self._ckpt_path()) with ops.Graph().as_default() as g: init_ops, get_next_ops, saver = self._build_graph(num_pipelines, num_outputs) with self.session(graph=g) as sess: self.evaluate(init_ops) saver.restore(sess, self._ckpt_path()) for _ in range(num_outputs - break_point): output = self.evaluate(get_next_ops) for i in range(num_pipelines): all_outputs[i].append(output[i]) for output in all_outputs: self.assertSequenceEqual(sorted(output), range(num_outputs)) def testUninitializedIterator(self): num_pipelines = 1 num_outputs = 1 with ops.Graph().as_default() as g: _, _, saver = self._build_graph(num_pipelines, num_outputs) with self.session(graph=g) as sess: with self.assertRaises(errors.FailedPreconditionError): saver.save(sess, self._ckpt_path()) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/serialization_integration_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the StatsDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import stats_aggregator from tensorflow.python.data.experimental.ops import stats_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.platform import test # TODO(b/116814321): Can not checkpoint input_pipeline with the # transformation `stats_ops.set_stats_aggregator`, since we don't support # saving/restoring resources (StatsAggregator in this case) yet. class StatsDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_dataset_bytes_stats(self, num_elements): return dataset_ops.Dataset.range(num_elements).map( lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).apply( stats_ops.bytes_produced_stats("bytes_produced")) def test_bytes_produced_stats_invalid_tag_shape(self): with self.assertRaisesRegexp( ValueError, "Shape must be rank 0 but is rank 1"): # pylint: disable=g-long-lambda self.run_core_tests( lambda: dataset_ops.Dataset.range(100).apply( stats_ops.bytes_produced_stats(["bytes_produced"])), 100) # pylint: enable=g-long-lambda def testBytesStatsDatasetSaveableCore(self): num_outputs = 100 self.run_core_tests(lambda: self._build_dataset_bytes_stats(num_outputs), num_outputs) def _build_dataset_latency_stats(self, num_elements, tag="record_latency"): return dataset_ops.Dataset.range(num_elements).apply( stats_ops.latency_stats(tag)) def _build_dataset_multiple_tags(self, num_elements, tag1="record_latency", tag2="record_latency_2"): return dataset_ops.Dataset.range(num_elements).apply( stats_ops.latency_stats(tag1)).apply(stats_ops.latency_stats(tag2)) def test_latency_stats_invalid_tag_shape(self): with self.assertRaisesRegexp( ValueError, "Shape must be rank 0 but is rank 1"): # pylint: disable=g-long-lambda self.run_core_tests( lambda: dataset_ops.Dataset.range(100).apply( stats_ops.latency_stats(["record_latency", "record_latency_2"])), 100) # pylint: enable=g-long-lambda def testLatencyStatsDatasetSaveableCore(self): num_outputs = 100 self.run_core_tests(lambda: self._build_dataset_latency_stats(num_outputs), num_outputs) self.run_core_tests(lambda: self._build_dataset_multiple_tags(num_outputs), num_outputs) tag1 = "record_latency" tag2 = "record_latency" self.run_core_tests( lambda: self._build_dataset_multiple_tags(num_outputs, tag1, tag2), num_outputs) def _build_dataset_stats_aggregator(self): aggregator = stats_aggregator.StatsAggregator() return dataset_ops.Dataset.range(10).apply( stats_ops.set_stats_aggregator(aggregator)) def test_set_stats_aggregator_not_support_checkpointing(self): with self.assertRaisesRegexp(errors.UnimplementedError, "does not support checkpointing"): self.run_core_tests(self._build_dataset_stats_aggregator, 10) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/stats_dataset_serialization_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ZipDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class ChooseFastestDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def testCore(self): num_outputs = 10 batch_size = 2 def build_ds(): dataset = dataset_ops.Dataset.range(num_outputs) map_fn = lambda x: x * 2 return optimization._ChooseFastestDataset([ # pylint: disable=protected-access dataset.map(map_fn).batch(batch_size), dataset.batch(batch_size).map(map_fn) ]) self.run_core_tests(build_ds, num_outputs // 2) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the RangeDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import io_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class RangeDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _iterator_checkpoint_prefix_local(self): return os.path.join(self.get_temp_dir(), "iterator") def _save_op(self, iterator_resource): iterator_state_variant = gen_dataset_ops.serialize_iterator( iterator_resource) save_op = io_ops.write_file( self._iterator_checkpoint_prefix_local(), parsing_ops.serialize_tensor(iterator_state_variant)) return save_op def _restore_op(self, iterator_resource): iterator_state_variant = parsing_ops.parse_tensor( io_ops.read_file(self._iterator_checkpoint_prefix_local()), dtypes.variant) restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource, iterator_state_variant) return restore_op def testSaveRestore(self): def _build_graph(start, stop): iterator = dataset_ops.make_initializable_iterator( dataset_ops.Dataset.range(start, stop)) init_op = iterator.initializer get_next = iterator.get_next() save_op = self._save_op(iterator._iterator_resource) restore_op = self._restore_op(iterator._iterator_resource) return init_op, get_next, save_op, restore_op # Saving and restoring in different sessions. start = 2 stop = 10 break_point = 5 with ops.Graph().as_default() as g: init_op, get_next, save_op, _ = _build_graph(start, stop) with self.session(graph=g) as sess: self.evaluate(variables.global_variables_initializer()) self.evaluate(init_op) for i in range(start, break_point): self.assertEqual(i, self.evaluate(get_next)) self.evaluate(save_op) with ops.Graph().as_default() as g: init_op, get_next, _, restore_op = _build_graph(start, stop) with self.session(graph=g) as sess: self.evaluate(init_op) self.evaluate(restore_op) for i in range(break_point, stop): self.assertEqual(i, self.evaluate(get_next)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next) # Saving and restoring in same session. with ops.Graph().as_default() as g: init_op, get_next, save_op, restore_op = _build_graph(start, stop) with self.session(graph=g) as sess: self.evaluate(variables.global_variables_initializer()) self.evaluate(init_op) for i in range(start, break_point): self.assertEqual(i, self.evaluate(get_next)) self.evaluate(save_op) self.evaluate(restore_op) for i in range(break_point, stop): self.assertEqual(i, self.evaluate(get_next)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next) def _build_range_dataset(self, start, stop): return dataset_ops.Dataset.range(start, stop) def testRangeCore(self): start = 2 stop = 10 stop_1 = 8 self.run_core_tests(lambda: self._build_range_dataset(start, stop), stop - start) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/range_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ShuffleAndRepeatDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import shuffle_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class ShuffleAndRepeatSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_ds(self, seed): return dataset_ops.Dataset.range(20).apply( shuffle_ops.shuffle_and_repeat(buffer_size=5, count=5, seed=seed)) def testCore(self): self.run_core_tests(lambda: self._build_ds(10), 100) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/shuffle_and_repeat_dataset_serialization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the GroupByWindow serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import grouping from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class GroupByWindowSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_dataset(self, components): return dataset_ops.Dataset.from_tensor_slices(components).repeat(-1).apply( grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4), 4)) def testCoreGroupByWindow(self): components = np.array( [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64) self.verify_unused_iterator( lambda: self._build_dataset(components), 12, verify_exhausted=False) self.verify_multiple_breaks( lambda: self._build_dataset(components), 12, verify_exhausted=False) self.verify_reset_restored_iterator( lambda: self._build_dataset(components), 12, verify_exhausted=False) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/group_by_window_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ConcatenateDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class ConcatenateDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_concatenate_dataset(self, var_array): input_components = (np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 4)) to_concatenate_components = (np.tile( np.array([[5], [6], [7], [8], [9]]), 20), var_array) return dataset_ops.Dataset.from_tensor_slices(input_components).concatenate( dataset_ops.Dataset.from_tensor_slices(to_concatenate_components)) def testConcatenateCore(self): num_outputs = 9 array = np.tile(np.array([[16], [17], [18], [19], [20]]), 15) self.run_core_tests(lambda: self._build_concatenate_dataset(array), num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/concatenate_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ScanDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import scan_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class ScanDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_dataset(self, num_elements): return dataset_ops.Dataset.from_tensors(1).repeat(num_elements).apply( scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1]))) def testScanCore(self): num_output = 5 self.run_core_tests(lambda: self._build_dataset(num_output), num_output) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/scan_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ZipDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class ZipDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_dataset(self, arr): components = [ np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(np.array([[12], [13], [14], [15]]), 22), np.array(arr) ] datasets = [ dataset_ops.Dataset.from_tensor_slices(component) for component in components ] return dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2]))) def testCore(self): # Equal length components arr = [37.0, 38.0, 39.0, 40.0] num_outputs = len(arr) self.run_core_tests(lambda: self._build_dataset(arr), num_outputs) # Variable length components diff_size_arr = [1.0, 2.0] self.run_core_tests(lambda: self._build_dataset(diff_size_arr), 2) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/zip_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the PaddedBatchDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test class PaddedBatchDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def testPaddedBatch(self): def build_dataset(seq_lens): return dataset_ops.Dataset.from_tensor_slices(seq_lens).map( lambda x: array_ops.fill([x], x)).padded_batch( 4, padded_shapes=[-1]) seq_lens = np.random.randint(1, 20, size=(32,)).astype(np.int32) self.run_core_tests(lambda: build_dataset(seq_lens), 8) def testPaddedBatchNonDefaultPadding(self): def build_dataset(seq_lens): def fill_tuple(x): filled = array_ops.fill([x], x) return (filled, string_ops.as_string(filled)) padded_shape = [-1] return dataset_ops.Dataset.from_tensor_slices(seq_lens).map( fill_tuple).padded_batch( 4, padded_shapes=(padded_shape, padded_shape), padding_values=(-1, "<end>")) seq_lens = np.random.randint(1, 20, size=(32,)).astype(np.int32) self.run_core_tests(lambda: build_dataset(seq_lens), 8) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/padded_batch_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the PrefetchDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class PrefetchDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def build_dataset(self, seed): return dataset_ops.Dataset.range(100).prefetch(10).shuffle( buffer_size=10, seed=seed, reshuffle_each_iteration=False) def testCore(self): num_outputs = 100 self.run_core_tests(lambda: self.build_dataset(10), num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/prefetch_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the sequence datasets serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class SkipDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_skip_dataset(self, count): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).skip(count) def testSkipFewerThanInputs(self): count = 4 num_outputs = 10 - count self.run_core_tests(lambda: self._build_skip_dataset(count), num_outputs) def testSkipVarious(self): # Skip more than inputs self.run_core_tests(lambda: self._build_skip_dataset(20), 0) # Skip exactly the input size self.run_core_tests(lambda: self._build_skip_dataset(10), 0) self.run_core_tests(lambda: self._build_skip_dataset(-1), 0) # Skip nothing self.run_core_tests(lambda: self._build_skip_dataset(0), 10) def testInvalidSkip(self): with self.assertRaisesRegexp(ValueError, 'Shape must be rank 0 but is rank 1'): self.run_core_tests(lambda: self._build_skip_dataset([1, 2]), 0) class TakeDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_take_dataset(self, count): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).take(count) def testTakeFewerThanInputs(self): count = 4 self.run_core_tests(lambda: self._build_take_dataset(count), count) def testTakeVarious(self): # Take more than inputs self.run_core_tests(lambda: self._build_take_dataset(20), 10) # Take exactly the input size self.run_core_tests(lambda: self._build_take_dataset(10), 10) # Take all self.run_core_tests(lambda: self._build_take_dataset(-1), 10) # Take nothing self.run_core_tests(lambda: self._build_take_dataset(0), 0) def testInvalidTake(self): with self.assertRaisesRegexp(ValueError, 'Shape must be rank 0 but is rank 1'): self.run_core_tests(lambda: self._build_take_dataset([1, 2]), 0) class RepeatDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_repeat_dataset(self, count, take_count=3): components = (np.arange(10),) return dataset_ops.Dataset.from_tensor_slices(components).take( take_count).repeat(count) def testFiniteRepeat(self): count = 10 self.run_core_tests(lambda: self._build_repeat_dataset(count), 3 * count) def testEmptyRepeat(self): self.run_core_tests(lambda: self._build_repeat_dataset(0), 0) def testInfiniteRepeat(self): self.verify_unused_iterator( lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False) self.verify_multiple_breaks( lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False) self.verify_reset_restored_iterator( lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False) # Test repeat empty dataset self.run_core_tests(lambda: self._build_repeat_dataset(-1, 0), 0) def testInvalidRepeat(self): with self.assertRaisesRegexp( ValueError, 'Shape must be rank 0 but is rank 1'): self.run_core_tests(lambda: self._build_repeat_dataset([1, 2], 0), 0) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the BatchDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import batching from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class BatchDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2): components = ( np.arange(tensor_slice_len), np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis], np.array(multiplier) * np.arange(tensor_slice_len)) return dataset_ops.Dataset.from_tensor_slices(components).batch(batch_size) def testCore(self): tensor_slice_len = 8 batch_size = 2 num_outputs = tensor_slice_len // batch_size self.run_core_tests( lambda: self.build_dataset(15.0, tensor_slice_len, batch_size), num_outputs) def _build_dataset_dense_to_sparse(self, components): return dataset_ops.Dataset.from_tensor_slices(components).map( lambda x: array_ops.fill([x], x)).apply( batching.dense_to_sparse_batch(4, [12])) def testDenseToSparseBatchDatasetCore(self): components = np.random.randint(5, size=(40,)).astype(np.int32) num_outputs = len(components) // 4 self.run_core_tests(lambda: self._build_dataset_dense_to_sparse(components), num_outputs) def _sparse(self, i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) def _build_dataset_sparse(self, batch_size=5): return dataset_ops.Dataset.range(10).map(self._sparse).batch(batch_size) def testSparseCore(self): self.run_core_tests(self._build_dataset_sparse, 2) def _build_dataset_nested_sparse(self): return dataset_ops.Dataset.range(10).map(self._sparse).batch(5).batch(2) def testNestedSparseCore(self): self.run_core_tests(self._build_dataset_nested_sparse, 1) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/batch_dataset_serialization_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the _AutoShard dataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import distribute from tensorflow.python.data.experimental.ops import interleave_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers from tensorflow.python.lib.io import python_io from tensorflow.python.platform import test from tensorflow.python.util import compat class AutoShardDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _record(self, f, r): return compat.as_bytes("Record %d of file %d" % (r, f)) def _createFiles(self): filenames = [] for i in range(10): fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) filenames.append(fn) writer = python_io.TFRecordWriter(fn) for j in range(10): writer.write(self._record(i, j)) writer.close() return filenames def setUp(self): self._filenames = self._createFiles() def testCore(self): def build_dataset(): dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False) dataset = dataset.apply( interleave_ops.parallel_interleave(readers.TFRecordDataset, 10)) dataset = distribute._AutoShardDataset(dataset, 5, 3) return dataset self.run_core_tests(build_dataset, 20) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/auto_shard_dataset_serialization_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ShardDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.platform import test class ShardDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase, parameterized.TestCase): def _build_dataset(self, num_elements, num_shards, index): return dataset_ops.Dataset.range(num_elements).shard(num_shards, index) @parameterized.parameters((10, 5, 2), (10, 10, 0), (100, 2, 0)) def testCore(self, elems, num_shards, index): self.run_core_tests(lambda: self._build_dataset(elems, num_shards, index), elems // num_shards) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/shard_dataset_serialization_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base class for testing serializable datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_tensor_value from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.training import checkpoint_management from tensorflow.python.training import saver as saver_lib from tensorflow.python.util import nest def remove_variants(get_next_op): # TODO(b/72408568): Remove this once session.run can get # variant tensors. """Remove variants from a nest structure, so sess.run will execute.""" def _remove_variant(x): if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant: return () else: return x return nest.map_structure(_remove_variant, get_next_op) class DatasetSerializationTestBase(test.TestCase): """Base class for testing serializable datasets.""" def tearDown(self): self._delete_ckpt() # TODO(b/72657739): Remove sparse_tensor argument, which is to test the # (deprecated) saveable `SparseTensorSliceDataset`, once the API # `from_sparse_tensor_slices()`and related tests are deleted. def run_core_tests(self, ds_fn, num_outputs, sparse_tensors=False): """Runs the core tests. Args: ds_fn: 0-argument function that returns a Dataset. num_outputs: Total number of outputs expected from this Dataset. sparse_tensors: Whether dataset is built from SparseTensor(s). Raises: AssertionError if any test fails. """ # NOTE: We disable all default optimizations in serialization tests in order # to test the actual dataset in question. options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False def ds_fn_no_opt(): return ds_fn().with_options(options) self.verify_unused_iterator( ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors) self.verify_fully_used_iterator( ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors) self.verify_exhausted_iterator( ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors) self.verify_multiple_breaks( ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors) self.verify_reset_restored_iterator( ds_fn_no_opt, num_outputs, sparse_tensors=sparse_tensors) def verify_unused_iterator(self, ds_fn, num_outputs, sparse_tensors=False, verify_exhausted=True): """Verifies that saving and restoring an unused iterator works. Args: ds_fn: See `run_core_tests`. num_outputs: See `run_core_tests`. sparse_tensors: See `run_core_tests`. verify_exhausted: See `gen_outputs`. Raises: AssertionError if any test fails. """ self.verify_run_with_breaks( ds_fn, [0], num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted) def verify_fully_used_iterator(self, ds_fn, num_outputs, sparse_tensors=False): """Verifies that saving and restoring a fully used iterator works. Note that this only checks saving and restoring an iterator from which `num_outputs` items have been produced but does not check for an exhausted iterator, i.e., one from which an OutOfRange error has been returned. Args: ds_fn: See `run_core_tests`. num_outputs: See `run_core_tests`. sparse_tensors: See `run_core_tests`. Raises: AssertionError if test fails. """ self.verify_run_with_breaks( ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors) def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False): """Verifies that saving and restoring an exhausted iterator works. An exhausted iterator is one which has returned an OutOfRange error. Args: ds_fn: See `run_core_tests`. num_outputs: See `run_core_tests`. sparse_tensors: See `run_core_tests`. Raises: AssertionError if any test fails. """ self.gen_outputs( ds_fn, [], num_outputs, verify_exhausted=True, sparse_tensors=sparse_tensors) actual = self.gen_outputs( ds_fn, [], 0, ckpt_saved=True, verify_exhausted=True, sparse_tensors=sparse_tensors) self.assertEqual(len(actual), 0) def verify_multiple_breaks(self, ds_fn, num_outputs, num_breaks=10, sparse_tensors=False, verify_exhausted=True): """Attempts to save/restore at multiple break points. Args: ds_fn: See `run_core_tests`. num_outputs: See `run_core_tests`. num_breaks: The number of break points. These are uniformly spread in [0, num_outputs] both inclusive. sparse_tensors: See `run_core_tests`. verify_exhausted: See `gen_outputs`. Raises: AssertionError if any test fails. """ self.verify_run_with_breaks( ds_fn, self.gen_break_points(num_outputs, num_breaks), num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted) def verify_reset_restored_iterator(self, ds_fn, num_outputs, break_point=None, sparse_tensors=False, verify_exhausted=True): """Attempts to re-initialize a restored iterator. This is useful when restoring a training checkpoint during validation. Args: ds_fn: See `run_core_tests`. num_outputs: See `run_core_tests`. break_point: Break point. Optional. Defaults to num_outputs/2. sparse_tensors: See `run_core_tests`. verify_exhausted: See `gen_outputs`. Raises: AssertionError if any test fails. """ break_point = num_outputs // 2 if not break_point else break_point # Collect ground truth containing all outputs. expected = self.gen_outputs( ds_fn, [], num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted) # Skip some items and save checkpoint. self.gen_outputs( ds_fn, [], break_point, sparse_tensors=sparse_tensors, verify_exhausted=False) actual = [] # Restore from checkpoint and then run init_op. with ops.Graph().as_default() as g: saver = self._import_meta_graph() init_op, get_next_op = self._get_iterator_ops_from_collection( ds_fn, sparse_tensors=sparse_tensors) get_next_op = remove_variants(get_next_op) with self.session(graph=g) as sess: self._initialize(init_op, sess) self._restore(saver, sess) self._initialize(init_op, sess) for _ in range(num_outputs): actual.append(sess.run(get_next_op)) if verify_exhausted: with self.assertRaises(errors.OutOfRangeError): sess.run(get_next_op) self.match(expected, actual) def verify_error_on_save(self, ds_fn, num_outputs, error, break_point=None, sparse_tensors=False): """Attempts to save a non-saveable iterator. Args: ds_fn: See `run_core_tests`. num_outputs: See `run_core_tests`. error: Declared error when trying to save iterator. break_point: Break point. Optional. Defaults to num_outputs/2. sparse_tensors: See `run_core_tests`. Raises: AssertionError if any test fails. """ break_point = num_outputs // 2 if not break_point else break_point with ops.Graph().as_default() as g: init_op, get_next_op, saver = self._build_graph( ds_fn, sparse_tensors=sparse_tensors) get_next_op = remove_variants(get_next_op) with self.session(graph=g) as sess: self._initialize(init_op, sess) for _ in range(break_point): sess.run(get_next_op) with self.assertRaises(error): self._save(sess, saver) def verify_run_with_breaks(self, ds_fn, break_points, num_outputs, sparse_tensors=False, verify_exhausted=True): """Verifies that ds_fn() produces the same outputs with and without breaks. 1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it *without* stopping at break points. 2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it with stopping at break points. Deep matches outputs from 1 and 2. Args: ds_fn: See `gen_outputs`. break_points: See `gen_outputs`. num_outputs: See `gen_outputs`. sparse_tensors: See `run_core_tests`. verify_exhausted: See `gen_outputs`. Raises: AssertionError if any test fails. """ expected = self.gen_outputs( ds_fn, [], num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted) actual = self.gen_outputs( ds_fn, break_points, num_outputs, sparse_tensors=sparse_tensors, verify_exhausted=verify_exhausted) self.match(expected, actual) def gen_outputs(self, ds_fn, break_points, num_outputs, ckpt_saved=False, sparse_tensors=False, verify_exhausted=True, save_checkpoint_at_end=True): """Generates elements from input dataset while stopping at break points. Produces `num_outputs` outputs and saves the state of the iterator in the Saver checkpoint. Args: ds_fn: 0-argument function that returns the dataset. break_points: A list of integers. For each `break_point` in `break_points`, we produce outputs till `break_point` number of items have been produced and then checkpoint the state. The current graph and session are destroyed and a new graph and session are used to produce outputs till next checkpoint or till `num_outputs` elements have been produced. `break_point` must be <= `num_outputs`. num_outputs: The total number of outputs to produce from the iterator. ckpt_saved: Whether a checkpoint already exists. sparse_tensors: Whether dataset is built from SparseTensor(s). verify_exhausted: Whether to verify that the iterator has been exhausted after producing `num_outputs` elements. save_checkpoint_at_end: Whether to save a checkpoint after producing all outputs. If False, checkpoints are saved each break point but not at the end. Note that checkpoints overwrite each other so there is always only a single checkpoint available. Defaults to True. Returns: A list of `num_outputs` items. """ outputs = [] def get_ops(): if ckpt_saved: saver = self._import_meta_graph() init_op, get_next_op = self._get_iterator_ops_from_collection( ds_fn, sparse_tensors=sparse_tensors) else: init_op, get_next_op, saver = self._build_graph( ds_fn, sparse_tensors=sparse_tensors) return init_op, get_next_op, saver for i in range(len(break_points) + 1): with ops.Graph().as_default() as g: init_op, get_next_op, saver = get_ops() get_next_op = remove_variants(get_next_op) with self.session(graph=g) as sess: if ckpt_saved: self._initialize(init_op, sess) self._restore(saver, sess) else: self._initialize(init_op, sess) start = break_points[i - 1] if i > 0 else 0 end = break_points[i] if i < len(break_points) else num_outputs num_iters = end - start for _ in range(num_iters): outputs.append(sess.run(get_next_op)) if i == len(break_points) and verify_exhausted: with self.assertRaises(errors.OutOfRangeError): sess.run(get_next_op) if save_checkpoint_at_end or i < len(break_points): self._save(sess, saver) ckpt_saved = True return outputs def match(self, expected, actual): """Matches nested structures. Recursively matches shape and values of `expected` and `actual`. Handles scalars, numpy arrays and other python sequence containers e.g. list, dict, as well as SparseTensorValue and RaggedTensorValue. Args: expected: Nested structure 1. actual: Nested structure 2. Raises: AssertionError if matching fails. """ if isinstance(expected, np.ndarray): expected = expected.tolist() if isinstance(actual, np.ndarray): actual = actual.tolist() self.assertEqual(type(expected), type(actual)) if nest.is_sequence(expected): self.assertEqual(len(expected), len(actual)) if isinstance(expected, dict): for key1, key2 in zip(sorted(expected), sorted(actual)): self.assertEqual(key1, key2) self.match(expected[key1], actual[key2]) else: for item1, item2 in zip(expected, actual): self.match(item1, item2) elif isinstance(expected, sparse_tensor.SparseTensorValue): self.match((expected.indices, expected.values, expected.dense_shape), (actual.indices, actual.values, actual.dense_shape)) elif isinstance(expected, ragged_tensor_value.RaggedTensorValue): self.match((expected.values, expected.row_splits), (actual.values, actual.row_splits)) else: self.assertEqual(expected, actual) def does_not_match(self, expected, actual): with self.assertRaises(AssertionError): self.match(expected, actual) def gen_break_points(self, num_outputs, num_samples=10): """Generates `num_samples` breaks points in [0, num_outputs].""" return np.linspace(0, num_outputs, num_samples, dtype=int) def _build_graph(self, ds_fn, sparse_tensors=False): iterator = dataset_ops.make_initializable_iterator(ds_fn()) saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator) ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable) init_op = iterator.initializer if sparse_tensors: get_next = sparse_tensor.SparseTensor(*iterator.get_next()) else: get_next = iterator.get_next() self._add_iterator_ops_to_collection(init_op, get_next, ds_fn, sparse_tensors) saver = saver_lib.Saver(allow_empty=True) return init_op, get_next, saver def _add_iterator_ops_to_collection(self, init_op, get_next, ds_fn, sparse_tensors=False): ops.add_to_collection("iterator_ops", init_op) # `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections # do not support tuples we flatten the tensors and restore the shape in # `_get_iterator_ops_from_collection`. if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`. ops.add_to_collection("iterator_ops", get_next.indices) ops.add_to_collection("iterator_ops", get_next.values) ops.add_to_collection("iterator_ops", get_next.dense_shape) return get_next_list = nest.flatten(get_next) for i, output_class in enumerate( nest.flatten(self._get_output_classes(ds_fn))): if output_class is sparse_tensor.SparseTensor: ops.add_to_collection("iterator_ops", get_next_list[i].indices) ops.add_to_collection("iterator_ops", get_next_list[i].values) ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape) else: ops.add_to_collection("iterator_ops", get_next_list[i]) def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False): all_ops = ops.get_collection("iterator_ops") if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`. init_op, indices, values, dense_shape = all_ops return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape) get_next_list = [] i = 1 for output_class in nest.flatten(self._get_output_classes(ds_fn)): if output_class is sparse_tensor.SparseTensor: indices, values, dense_shape = all_ops[i:i + 3] i += 3 get_next_list.append( sparse_tensor.SparseTensor(indices, values, dense_shape)) else: get_next_list.append(all_ops[i]) i += 1 return all_ops[0], nest.pack_sequence_as( self._get_output_types(ds_fn), get_next_list) def _get_output_types(self, ds_fn): with ops.Graph().as_default(): return dataset_ops.get_legacy_output_types(ds_fn()) def _get_output_shapes(self, ds_fn): with ops.Graph().as_default(): return dataset_ops.get_legacy_output_shapes(ds_fn()) def _get_output_classes(self, ds_fn): with ops.Graph().as_default(): return dataset_ops.get_legacy_output_classes(ds_fn()) def _ckpt_path(self): return os.path.join(self.get_temp_dir(), "iterator") def _latest_ckpt(self): return checkpoint_management.latest_checkpoint(self.get_temp_dir()) def _save(self, sess, saver): saver.save(sess, self._ckpt_path()) def _restore(self, saver, sess): sess.run(lookup_ops.tables_initializer()) saver.restore(sess, self._latest_ckpt()) def _initialize(self, init_op, sess): sess.run(variables.global_variables_initializer()) sess.run(lookup_ops.tables_initializer()) sess.run(init_op) def _import_meta_graph(self): meta_file_path = self._ckpt_path() + ".meta" return saver_lib.import_meta_graph(meta_file_path) def _delete_ckpt(self): # Remove all checkpoint files. prefix = self._ckpt_path() pattern = prefix + "*" files = gfile.Glob(pattern) map(gfile.Remove, files)
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the IgnoreErrors input pipeline ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import error_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class IgnoreErrorsSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def _build_ds(self): return dataset_ops.Dataset.range(5).map( array_ops.ones).map(lambda x: array_ops.gather(x, [0])).apply( error_ops.ignore_errors()) def testIgnoreErrorsCore(self): num_outputs = 4 self.run_core_tests(self._build_ds, num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/ignore_errors_serialization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the CsvDataset serialization.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base from tensorflow.python.data.experimental.ops import readers from tensorflow.python.platform import test class CsvDatasetSerializationTest( dataset_serialization_test_base.DatasetSerializationTestBase): def setUp(self): self._num_cols = 7 self._num_rows = 10 self._num_epochs = 14 self._num_outputs = self._num_rows * self._num_epochs inputs = [ ",".join(str(self._num_cols * j + i) for i in range(self._num_cols)) for j in range(self._num_rows) ] contents = "\n".join(inputs).encode("utf-8") self._filename = os.path.join(self.get_temp_dir(), "file.csv") self._compressed = os.path.join(self.get_temp_dir(), "comp.csv") # GZip compressed with open(self._filename, "wb") as f: f.write(contents) with gzip.GzipFile(self._compressed, "wb") as f: f.write(contents) def ds_func(self, **kwargs): compression_type = kwargs.get("compression_type", None) if compression_type == "GZIP": filename = self._compressed elif compression_type is None: filename = self._filename else: raise ValueError("Invalid compression type:", compression_type) return readers.CsvDataset(filename, **kwargs).repeat(self._num_epochs) def testSerializationCore(self): defs = [[0]] * self._num_cols self.run_core_tests( lambda: self.ds_func(record_defaults=defs, buffer_size=2), self._num_outputs) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/experimental/kernel_tests/serialization/csv_dataset_serialization_test.py