python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.TFRecordDataset`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import zlib from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import readers from tensorflow.python.framework import test_util from tensorflow.python.lib.io import python_io from tensorflow.python.platform import test from tensorflow.python.util import compat @test_util.run_all_in_graph_and_eager_modes class TFRecordDatasetTest(test_base.DatasetTestBase): def setUp(self): super(TFRecordDatasetTest, self).setUp() self._num_files = 2 self._num_records = 7 self.test_filenames = self._createFiles() def dataset_fn(self, filenames, compression_type="", num_epochs=1, batch_size=None): repeat_dataset = readers.TFRecordDataset( filenames, compression_type).repeat(num_epochs) if batch_size: return repeat_dataset.batch(batch_size) return repeat_dataset def _record(self, f, r): return compat.as_bytes("Record %d of file %d" % (r, f)) def _createFiles(self): filenames = [] for i in range(self._num_files): fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i) filenames.append(fn) writer = python_io.TFRecordWriter(fn) for j in range(self._num_records): writer.write(self._record(i, j)) writer.close() return filenames def testReadOneEpoch(self): # Basic test: read from file 0. dataset = self.dataset_fn(self.test_filenames[0]) self.assertDatasetProduces( dataset, expected_output=[self._record(0, i) for i in range(self._num_records)]) # Basic test: read from file 1. dataset = self.dataset_fn(self.test_filenames[1]) self.assertDatasetProduces( dataset, expected_output=[self._record(1, i) for i in range(self._num_records)]) # Basic test: read from both files. dataset = self.dataset_fn(self.test_filenames) expected_output = [] for j in range(self._num_files): expected_output.extend( [self._record(j, i) for i in range(self._num_records)]) self.assertDatasetProduces(dataset, expected_output=expected_output) def testReadTenEpochs(self): dataset = self.dataset_fn(self.test_filenames, num_epochs=10) expected_output = [] for j in range(self._num_files): expected_output.extend( [self._record(j, i) for i in range(self._num_records)]) self.assertDatasetProduces(dataset, expected_output=expected_output * 10) def testReadTenEpochsOfBatches(self): dataset = self.dataset_fn( self.test_filenames, num_epochs=10, batch_size=self._num_records) expected_output = [] for j in range(self._num_files): expected_output.append( [self._record(j, i) for i in range(self._num_records)]) self.assertDatasetProduces(dataset, expected_output=expected_output * 10) def testReadZlibFiles(self): zlib_files = [] for i, fn in enumerate(self.test_filenames): with open(fn, "rb") as f: cdata = zlib.compress(f.read()) zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i) with open(zfn, "wb") as f: f.write(cdata) zlib_files.append(zfn) expected_output = [] for j in range(self._num_files): expected_output.extend( [self._record(j, i) for i in range(self._num_records)]) dataset = self.dataset_fn(zlib_files, compression_type="ZLIB") self.assertDatasetProduces(dataset, expected_output=expected_output) def testReadGzipFiles(self): gzip_files = [] for i, fn in enumerate(self.test_filenames): with open(fn, "rb") as f: gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i) with gzip.GzipFile(gzfn, "wb") as gzf: gzf.write(f.read()) gzip_files.append(gzfn) expected_output = [] for j in range(self._num_files): expected_output.extend( [self._record(j, i) for i in range(self._num_records)]) dataset = self.dataset_fn(gzip_files, compression_type="GZIP") self.assertDatasetProduces(dataset, expected_output=expected_output) def testReadWithBuffer(self): one_mebibyte = 2**20 dataset = readers.TFRecordDataset( self.test_filenames, buffer_size=one_mebibyte) expected_output = [] for j in range(self._num_files): expected_output.extend( [self._record(j, i) for i in range(self._num_records)]) self.assertDatasetProduces(dataset, expected_output=expected_output) def testReadFromDatasetOfFiles(self): files = dataset_ops.Dataset.from_tensor_slices(self.test_filenames) expected_output = [] for j in range(self._num_files): expected_output.extend( [self._record(j, i) for i in range(self._num_records)]) dataset = readers.TFRecordDataset(files) self.assertDatasetProduces(dataset, expected_output=expected_output) def testReadTenEpochsFromDatasetOfFilesInParallel(self): files = dataset_ops.Dataset.from_tensor_slices( self.test_filenames).repeat(10) expected_output = [] for j in range(self._num_files): expected_output.extend( [self._record(j, i) for i in range(self._num_records)]) dataset = readers.TFRecordDataset(files, num_parallel_reads=4) self.assertDatasetProduces( dataset, expected_output=expected_output * 10, assert_items_equal=True) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/tf_record_dataset_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.skip()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class SkipTest(test_base.DatasetTestBase): def testSkipTensorDataset(self): components = (np.arange(10),) def do_test(count): dataset = dataset_ops.Dataset.from_tensor_slices(components).skip(count) self.assertEqual( [c.shape[1:] for c in components], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)]) start_range = min(count, 10) if count != -1 else 10 self.assertDatasetProduces( dataset, [tuple(components[0][i:i + 1]) for i in range(start_range, 10)]) # Skip fewer than input size, we should skip # the first 4 elements and then read the rest. do_test(4) # Skip more than input size: get nothing. do_test(25) # Skip exactly input size. do_test(10) # Set -1 for 'count': skip the entire dataset. do_test(-1) # Skip nothing do_test(0) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/skip_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.data.Dataset.from_generator().""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import numpy as np from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.ops import script_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class DatasetConstructorTest(test_base.DatasetTestBase): def _testFromGenerator(self, generator, elem_sequence, num_repeats, output_types=None): if output_types is None: output_types = dtypes.int64 dataset = dataset_ops.Dataset.from_generator( generator, output_types=output_types).repeat(num_repeats).prefetch(5) self.assertDatasetProduces( dataset, elem_sequence * num_repeats, requires_initialization=True, num_test_iterations=2) def _testFromGeneratorOneShot(self, generator, elem_sequence, num_repeats): dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int64).repeat(num_repeats).prefetch(5) self.assertDatasetProduces( dataset, elem_sequence * num_repeats, num_test_iterations=2) def testFromGeneratorUsingFunction(self): def generator(): for i in range(1, 100): yield [i] * i elem_sequence = list(generator()) self._testFromGenerator(generator, elem_sequence, 1) self._testFromGenerator(generator, elem_sequence, 5) self._testFromGeneratorOneShot(generator, elem_sequence, 1) self._testFromGeneratorOneShot(generator, elem_sequence, 5) def testFromGeneratorUsingList(self): generator = lambda: [[i] * i for i in range(1, 100)] elem_sequence = list(generator()) self._testFromGenerator(generator, elem_sequence, 1) self._testFromGenerator(generator, elem_sequence, 5) def testFromGeneratorUsingNdarray(self): generator = lambda: np.arange(100, dtype=np.int64) elem_sequence = list(generator()) self._testFromGenerator(generator, elem_sequence, 1, output_types=np.int64) self._testFromGenerator(generator, elem_sequence, 5, output_types=np.int64) def testFromGeneratorUsingGeneratorExpression(self): # NOTE(mrry): Generator *expressions* are not repeatable (or in # general reusable), because they eagerly evaluate the `for` # expression as `iter(range(1, 100))` and discard the means of # reconstructing `range(1, 100)`. Wrapping the generator # expression in a `lambda` makes it repeatable. generator = lambda: ([i] * i for i in range(1, 100)) elem_sequence = list(generator()) self._testFromGenerator(generator, elem_sequence, 1) self._testFromGenerator(generator, elem_sequence, 5) def testFromMultipleConcurrentGenerators(self): num_inner_repeats = 5 num_outer_repeats = 100 def generator(): for i in range(1, 10): yield ([i] * i, [i, i ** 2, i ** 3]) input_list = list(generator()) # The interleave transformation is essentially a flat map that # draws from multiple input datasets concurrently (in a cyclic # fashion). By placing `Datsaet.from_generator()` inside an # interleave, we test its behavior when multiple iterators are # active at the same time; by additionally prefetching inside the # interleave, we create the possibility of parallel (modulo GIL) # invocations to several iterators created by the same dataset. def interleave_fn(_): return (dataset_ops.Dataset.from_generator( generator, output_types=(dtypes.int64, dtypes.int64), output_shapes=([None], [3])) .repeat(num_inner_repeats).prefetch(5)) dataset = dataset_ops.Dataset.range(num_outer_repeats).interleave( interleave_fn, cycle_length=10, block_length=len(input_list)) get_next = self.getNext(dataset) for _ in range(num_inner_repeats * num_outer_repeats): for elem in input_list: val0, val1 = self.evaluate(get_next()) self.assertAllEqual(elem[0], val0) self.assertAllEqual(elem[1], val1) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # TODO(b/67868766): Reenable this when the source of flakiness is discovered. def _testFromGeneratorsRunningInParallel(self): num_parallel_iterators = 3 # Define shared state that multiple iterator instances will access to # demonstrate their concurrent activity. lock = threading.Lock() condition = threading.Condition(lock) next_ticket = [0] # GUARDED_BY(lock) def generator(): # NOTE(mrry): We yield one element before the barrier, because # the current implementation of `Dataset.interleave()` must # fetch one element from each incoming dataset to start the # prefetching. yield 0 # Define a barrier that `num_parallel_iterators` iterators must enter # before any can proceed. Demonstrates that multiple iterators may be # active at the same time. condition.acquire() ticket = next_ticket[0] next_ticket[0] += 1 if ticket == num_parallel_iterators - 1: # The last iterator to join the barrier notifies the others. condition.notify_all() else: # Wait until the last iterator enters the barrier. while next_ticket[0] < num_parallel_iterators: condition.wait() condition.release() yield 1 # As in `testFromMultipleConcurrentGenerators()`, we use a combination of # `Dataset.interleave()` and `Dataset.prefetch()` to cause multiple # iterators to be active concurrently. def interleave_fn(_): return dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int64, output_shapes=[]).prefetch(2) dataset = dataset_ops.Dataset.range(num_parallel_iterators).interleave( interleave_fn, cycle_length=num_parallel_iterators, block_length=1) get_next = self.getNext(dataset) for elem in [0, 1]: for _ in range(num_parallel_iterators): self.assertAllEqual(elem, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testFromGeneratorImplicitConversion(self): def generator(): yield [1] yield [2] yield [3] for dtype in [dtypes.int8, dtypes.int32, dtypes.int64]: dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtype, output_shapes=[1]) get_next = self.getNext(dataset) for expected in [[1], [2], [3]]: next_val = self.evaluate(get_next()) self.assertEqual(dtype.as_numpy_dtype, next_val.dtype) self.assertAllEqual(expected, next_val) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testFromGeneratorString(self): def generator(): yield "foo" yield b"bar" yield u"baz" dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.string, output_shapes=[]) self.assertDatasetProduces( dataset, expected_output=[b"foo", b"bar", b"baz"]) def testFromGeneratorTypeError(self): def generator(): yield np.array([1, 2, 3], dtype=np.int64) yield np.array([4, 5, 6], dtype=np.int64) yield "ERROR" yield np.array([7, 8, 9], dtype=np.int64) dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int64, output_shapes=[3]) get_next = self.getNext(dataset) self.assertAllEqual([1, 2, 3], self.evaluate(get_next())) self.assertAllEqual([4, 5, 6], self.evaluate(get_next())) with self.assertRaisesOpError("The expected type was int64"): self.evaluate(get_next()) self.assertAllEqual([7, 8, 9], self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testFromGeneratorShapeError(self): def generator(): yield np.array([1, 2, 3], dtype=np.int64) yield np.array([4, 5, 6], dtype=np.int64) yield np.array([7, 8, 9, 10], dtype=np.int64) yield np.array([11, 12, 13], dtype=np.int64) dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int64, output_shapes=[3]) get_next = self.getNext(dataset) self.assertAllEqual([1, 2, 3], self.evaluate(get_next())) self.assertAllEqual([4, 5, 6], self.evaluate(get_next())) with self.assertRaisesOpError(r"element of shape \(3,\) was expected"): self.evaluate(get_next()) self.assertAllEqual([11, 12, 13], self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testFromGeneratorStructureError(self): def generator(): yield 1, 2 yield 3, 4 yield 5 yield 6, 7, 8 yield 9, 10 dataset = dataset_ops.Dataset.from_generator( generator, output_types=(dtypes.int64, dtypes.int64)) get_next = self.getNext(dataset) self.assertEqual((1, 2), self.evaluate(get_next())) self.assertEqual((3, 4), self.evaluate(get_next())) with self.assertRaisesOpError( r"The expected structure was \(tf\.int64, tf\.int64\)"): self.evaluate(get_next()) with self.assertRaisesOpError( r"The expected structure was \(tf\.int64, tf\.int64\)"): self.evaluate(get_next()) self.assertEqual((9, 10), self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testFromGeneratorHeterogeneous(self): def generator(): yield 1 yield [2, 3] dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int64) self.assertDatasetProduces(dataset, expected_output=[1, [2, 3]]) def testFromGeneratorStopShort(self): def generator(): yield 0 yield 1 yield 2 dataset = dataset_ops.Dataset.from_generator( generator, output_types=dtypes.int64) get_next = self.getNext(dataset) self.assertAllEqual(0, self.evaluate(get_next())) self.assertAllEqual(1, self.evaluate(get_next())) def testFromGeneratorDestructorCalled(self): # Use an `Event` to signal that the generator has been deleted. event = threading.Event() class GeneratorWrapper(object): def __iter__(self): return self def next(self): return self.__next__() def __next__(self): return 42 def __del__(self): event.set() dataset = dataset_ops.Dataset.from_generator( GeneratorWrapper, output_types=dtypes.int64).take(2) get_next = self.getNext(dataset) self.assertAllEqual(42, self.evaluate(get_next())) self.assertAllEqual(42, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) # Test that `GeneratorWrapper` object is destroyed when the # iterator terminates (and the generator iterator is deleted). self.assertTrue(event.is_set()) def testFromGeneratorWithArgs(self): def flat_map_fn(elem): def generator_with_arg(n): for _ in range(n): yield np.array(n, dtype=np.int64) return dataset_ops.Dataset.from_generator( generator_with_arg, output_types=dtypes.int64, output_shapes=(), args=(elem,)) dataset = dataset_ops.Dataset.range(5).flat_map(flat_map_fn) self.assertDatasetProduces( dataset, expected_output=[1, 2, 2, 3, 3, 3, 4, 4, 4, 4]) def testFromGeneratorWithTwoArgs(self): def flat_map_fn(elem, message): def generator_with_arg(n, msg): for i in range(n): yield i, msg return dataset_ops.Dataset.from_generator( generator_with_arg, output_types=(dtypes.int64, dtypes.string), output_shapes=((), ()), args=(elem, message)) dataset = dataset_ops.Dataset.zip( (dataset_ops.Dataset.range(5), dataset_ops.Dataset.from_tensors("Hi!").repeat(None) )).flat_map(flat_map_fn) self.assertDatasetProduces( dataset, expected_output=[(0, b"Hi!"), (0, b"Hi!"), (1, b"Hi!"), (0, b"Hi!"), (1, b"Hi!"), (2, b"Hi!"), (0, b"Hi!"), (1, b"Hi!"), (2, b"Hi!"), (3, b"Hi!")]) def testGeneratorDatasetFinalizeFunctionCalled(self): # NOTE(mrry): This test tests the internal `_GeneratorDataset`, # which affords more control over what the finalize function can do than # the `Dataset.from_generator()` wrapper. # Use an `Event` to signal that the generator has been deleted. event = threading.Event() def finalize_fn(_): def finalize_py_func(): event.set() return 0 return script_ops.py_func(finalize_py_func, [], [dtypes.int64], stateful=True) dummy = constant_op.constant(37) dataset = dataset_ops._GeneratorDataset(dummy, lambda x: x, lambda x: x, finalize_fn).take(2) get_next = self.getNext(dataset) self.assertAllEqual(37, self.evaluate(get_next())) self.assertAllEqual(37, self.evaluate(get_next())) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) self.assertTrue(event.is_set()) def testSharedName(self): def generator(): for _ in range(10): yield [20] dataset = dataset_ops.Dataset.from_generator( generator, output_types=(dtypes.int64)) get_next = self.getNext( dataset, requires_initialization=True, shared_name="shared_dataset") self.assertAllEqual([20], self.evaluate(get_next())) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/from_generator_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test utilities for tf.data functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from tensorflow.python import tf2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.data.util import structure from tensorflow.python.eager import context from tensorflow.python.framework import combinations from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test def default_test_combinations(): """Returns the default test combinations for tf.data tests.""" return combinations.combine(tf_api_version=[1, 2], mode=["eager", "graph"]) class DatasetTestBase(test.TestCase): """Base class for dataset tests.""" @classmethod def setUpClass(cls): if tf2.enabled(): dataset_ops.Dataset = dataset_ops.DatasetV2 else: dataset_ops.Dataset = dataset_ops.DatasetV1 def assert_op_cancelled(self, op): with self.assertRaises(errors.CancelledError): self.evaluate(op) def assertValuesEqual(self, expected, actual): """Asserts that two values are equal.""" if sparse_tensor.is_sparse(expected): self.assertAllEqual(expected.indices, actual.indices) self.assertAllEqual(expected.values, actual.values) self.assertAllEqual(expected.dense_shape, actual.dense_shape) else: self.assertAllEqual(expected, actual) def getNext(self, dataset, requires_initialization=False, shared_name=None): """Returns a callable that returns the next element of the dataset. Example use: ```python # In both graph and eager modes dataset = ... get_next = self.getNext(dataset) result = self.evaluate(get_next()) ``` Args: dataset: A dataset whose elements will be returned. requires_initialization: Indicates that when the test is executed in graph mode, it should use an initializable iterator to iterate through the dataset (e.g. when it contains stateful nodes). Defaults to False. shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). Returns: A callable that returns the next element of `dataset`. Any `TensorArray` objects `dataset` outputs are stacked. """ def ta_wrapper(gn): def _wrapper(): r = gn() if isinstance(r, tensor_array_ops.TensorArray): return r.stack() else: return r return _wrapper # Create an anonymous iterator if we are in eager-mode or are graph inside # of a tf.function. building_function = ops.get_default_graph()._building_function # pylint: disable=protected-access if context.executing_eagerly() or building_function: iterator = iter(dataset) return ta_wrapper(iterator._next_internal) # pylint: disable=protected-access else: if requires_initialization: iterator = dataset_ops.make_initializable_iterator(dataset, shared_name) self.evaluate(iterator.initializer) else: iterator = dataset_ops.make_one_shot_iterator(dataset) get_next = iterator.get_next() return ta_wrapper(lambda: get_next) def _compareOutputToExpected(self, result_values, expected_values, assert_items_equal): if assert_items_equal: # TODO(shivaniagrawal): add support for nested elements containing sparse # tensors when needed. self.assertItemsEqual(result_values, expected_values) return for i in range(len(result_values)): nest.assert_same_structure(result_values[i], expected_values[i]) for result_value, expected_value in zip( nest.flatten(result_values[i]), nest.flatten(expected_values[i])): self.assertValuesEqual(expected_value, result_value) def assertDatasetProduces(self, dataset, expected_output=None, expected_shapes=None, expected_error=None, requires_initialization=False, num_test_iterations=1, assert_items_equal=False, expected_error_iter=1): """Asserts that a dataset produces the expected output / error. Args: dataset: A dataset to check for the expected output / error. expected_output: A list of elements that the dataset is expected to produce. expected_shapes: A list of TensorShapes which is expected to match output_shapes of dataset. expected_error: A tuple `(type, predicate)` identifying the expected error `dataset` should raise. The `type` should match the expected exception type, while `predicate` should either be 1) a unary function that inputs the raised exception and returns a boolean indicator of success or 2) a regular expression that is expected to match the error message partially. requires_initialization: Indicates that when the test is executed in graph mode, it should use an initializable iterator to iterate through the dataset (e.g. when it contains stateful nodes). Defaults to False. num_test_iterations: Number of times `dataset` will be iterated. Defaults to 2. assert_items_equal: Tests expected_output has (only) the same elements regardless of order. expected_error_iter: How many times to iterate before expecting an error, if an error is expected. """ self.assertTrue( expected_error is not None or expected_output is not None, "Exactly one of expected_output or expected error should be provided.") if expected_error: self.assertTrue( expected_output is None, "Exactly one of expected_output or expected error should be provided." ) with self.assertRaisesWithPredicateMatch(expected_error[0], expected_error[1]): get_next = self.getNext( dataset, requires_initialization=requires_initialization) for _ in range(expected_error_iter): self.evaluate(get_next()) return if expected_shapes: self.assertEqual(expected_shapes, dataset_ops.get_legacy_output_shapes(dataset)) self.assertGreater(num_test_iterations, 0) for _ in range(num_test_iterations): get_next = self.getNext( dataset, requires_initialization=requires_initialization) result = [] for _ in range(len(expected_output)): result.append(self.evaluate(get_next())) self._compareOutputToExpected(result, expected_output, assert_items_equal) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def assertDatasetsEqual(self, dataset1, dataset2): """Checks that datasets are equal. Supports both graph and eager mode.""" self.assertTrue( structure.are_compatible( dataset_ops.get_structure(dataset1), dataset_ops.get_structure(dataset2))) flattened_types = nest.flatten( dataset_ops.get_legacy_output_types(dataset1)) next1 = self.getNext(dataset1) next2 = self.getNext(dataset2) while True: try: op1 = self.evaluate(next1()) except errors.OutOfRangeError: with self.assertRaises(errors.OutOfRangeError): self.evaluate(next2()) break op2 = self.evaluate(next2()) op1 = nest.flatten(op1) op2 = nest.flatten(op2) assert len(op1) == len(op2) for i in range(len(op1)): if sparse_tensor.is_sparse(op1[i]) or ragged_tensor.is_ragged(op1[i]): self.assertValuesEqual(op1[i], op2[i]) elif flattened_types[i] == dtypes.string: self.assertAllEqual(op1[i], op2[i]) else: self.assertAllClose(op1[i], op2[i]) def assertDatasetsRaiseSameError(self, dataset1, dataset2, exception_class, replacements=None): """Checks that datasets raise the same error on the first get_next call.""" if replacements is None: replacements = [] next1 = self.getNext(dataset1) next2 = self.getNext(dataset2) try: self.evaluate(next1()) raise ValueError( "Expected dataset to raise an error of type %s, but it did not." % repr(exception_class)) except exception_class as e: expected_message = e.message for old, new, count in replacements: expected_message = expected_message.replace(old, new, count) # Check that the first segment of the error messages are the same. with self.assertRaisesRegexp(exception_class, re.escape(expected_message)): self.evaluate(next2()) def structuredDataset(self, dataset_structure, shape=None, dtype=dtypes.int64): """Returns a singleton dataset with the given structure.""" if shape is None: shape = [] if dataset_structure is None: return dataset_ops.Dataset.from_tensors( array_ops.zeros(shape, dtype=dtype)) else: return dataset_ops.Dataset.zip( tuple([ self.structuredDataset(substructure, shape, dtype) for substructure in dataset_structure ])) def structuredElement(self, element_structure, shape=None, dtype=dtypes.int64): """Returns an element with the given structure.""" if shape is None: shape = [] if element_structure is None: return array_ops.zeros(shape, dtype=dtype) else: return tuple([ self.structuredElement(substructure, shape, dtype) for substructure in element_structure ])
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/test_base.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.MultiDeviceIterator`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.data.experimental.ops import optimization from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import multi_device_iterator_ops from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class MultiDeviceIteratorTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.parameters(0, 1, 42,) @test_util.run_v1_only("b/121264236") def testInitOnly(self, num_inits): dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"]) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): for _ in range(num_inits): self.evaluate(multi_device_iterator.initializer) @test_util.run_v1_only("b/121264236") def testBasic(self): dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"]) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @test_util.run_v1_only("b/121264236") def testOneOnSameDevice(self): with ops.device("/cpu:0"): dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:0", "/cpu:1"]) config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @test_util.run_v1_only("b/121264236") def testRepeatDevices(self): with ops.device("/cpu:0"): dataset = dataset_ops.Dataset.range(20) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2", "/cpu:1", "/cpu:2"]) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 20, 4): elements = multi_device_iterator.get_next() elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) self.assertEqual(i + 2, self.evaluate(elem_on_3)) self.assertEqual(i + 3, self.evaluate(elem_on_4)) with self.assertRaises(errors.OutOfRangeError): elements = multi_device_iterator.get_next() elem_on_1, elem_on_2, elem_on_3, elem_on_4 = elements self.evaluate(elem_on_1) self.evaluate(elem_on_2) self.evaluate(elem_on_3) self.evaluate(elem_on_4) @test_util.run_v1_only("b/121264236") def testNotFullyDivisible(self): dataset = dataset_ops.Dataset.range(9) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"]) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 8, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) elem_on_1 = multi_device_iterator.get_next("/cpu:1") self.assertEqual(8, self.evaluate(elem_on_1)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @test_util.run_v1_only("b/121264236") def testGetNextAsOptional(self): if context.executing_eagerly(): return dataset = dataset_ops.Dataset.range(9) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"]) elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional() elem_on_1_has_value_t = elem_on_1.has_value() elem_on_1_t = elem_on_1.get_value() elem_on_2_has_value_t = elem_on_2.has_value() elem_on_2_t = elem_on_2.get_value() config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config) as sess: self.evaluate(multi_device_iterator.initializer) for i in range(0, 8, 2): elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(i, elem_on_1_value) elem_on_2_has_value, elem_on_2_value = sess.run( [elem_on_2_has_value_t, elem_on_2_t]) self.assertTrue(elem_on_2_has_value) self.assertEqual(i + 1, elem_on_2_value) elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(8, elem_on_1_value) self.assertFalse(self.evaluate(elem_on_1_has_value_t)) self.assertFalse(self.evaluate(elem_on_2_has_value_t)) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_on_1_t) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_on_2_t) @test_util.run_v1_only("b/121264236") def testUneven(self): dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"], max_buffer_size=4) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1 = multi_device_iterator.get_next("/cpu:1") self.assertEqual(i, self.evaluate(elem_on_1)) for i in range(0, 10, 2): elem_on_2 = multi_device_iterator.get_next("/cpu:2") self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @test_util.run_v1_only("b/121264236") def testMultipleInitializationsGraph(self): if context.executing_eagerly(): return with ops.device("/cpu:0"): epoch = array_ops.placeholder(dtypes.int64, shape=[]) dataset1 = dataset_ops.Dataset.from_tensors(epoch).repeat(1000) dataset2 = dataset_ops.Dataset.range(1000) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4) elem_on_1, elem_on_2 = multi_device_iterator.get_next() init_op = multi_device_iterator.initializer config = config_pb2.ConfigProto(device_count={"CPU": 3}) pool = config.session_inter_op_thread_pool.add() pool.num_threads = 2 with session.Session(config=config) as sess: for i in range(1000): sess.run(init_op, feed_dict={epoch: i}) self.assertEqual([(i, 0), (i, 1)], self.evaluate([elem_on_1, elem_on_2])) @test_util.run_v1_only("b/121264236") def testMultipleInitializationsEager(self): if not context.executing_eagerly(): return with ops.device("/cpu:0"): dataset1 = dataset_ops.Dataset.range(1000) dataset2 = dataset_ops.Dataset.range(1000) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) for _ in range(5): multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"], prefetch_buffer_size=4) elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual([(0, 0), (1, 1)], self.evaluate([elem_on_1, elem_on_2])) @test_util.run_v1_only("b/121264236") def testBasicGpu(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/gpu:0"]) config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @test_util.run_v1_only("b/121264236") def testUnevenGpu(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") dataset = dataset_ops.Dataset.range(10) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/gpu:0"], max_buffer_size=4) config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1 = multi_device_iterator.get_next("/cpu:1") self.assertEqual(i, self.evaluate(elem_on_1)) for i in range(0, 10, 2): elem_on_2 = multi_device_iterator.get_next("/gpu:0") self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) @test_util.run_v1_only("b/121264236") def testGetNextAsOptionalGpu(self): if not test_util.is_gpu_available() or context.executing_eagerly(): self.skipTest("No GPU available") dataset = dataset_ops.Dataset.range(9) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/gpu:0"]) elem_on_1, elem_on_2 = multi_device_iterator.get_next_as_optional() elem_on_1_has_value_t = elem_on_1.has_value() elem_on_1_t = elem_on_1.get_value() elem_on_2_has_value_t = elem_on_2.has_value() elem_on_2_t = elem_on_2.get_value() config = config_pb2.ConfigProto(device_count={"CPU": 2, "GPU": 1}) with self.test_session(config=config) as sess: self.evaluate(multi_device_iterator.initializer) for i in range(0, 8, 2): elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(i, elem_on_1_value) elem_on_2_has_value, elem_on_2_value = sess.run( [elem_on_2_has_value_t, elem_on_2_t]) self.assertTrue(elem_on_2_has_value) self.assertEqual(i + 1, elem_on_2_value) elem_on_1_has_value, elem_on_1_value = sess.run( [elem_on_1_has_value_t, elem_on_1_t]) self.assertTrue(elem_on_1_has_value) self.assertEqual(8, elem_on_1_value) self.assertFalse(self.evaluate(elem_on_1_has_value_t)) self.assertFalse(self.evaluate(elem_on_2_has_value_t)) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_on_1_t) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(elem_on_2_t) @test_util.run_v1_only("b/121264236") def testOptimization(self): dataset = dataset_ops.Dataset.range(10) dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"])) dataset = dataset.skip(0) # this should be optimized away dataset = dataset.cache() options = dataset_ops.Options() options.experimental_optimization.noop_elimination = True dataset = dataset.with_options(options) multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( dataset, ["/cpu:1", "/cpu:2"]) config = config_pb2.ConfigProto(device_count={"CPU": 3}) with self.test_session(config=config): self.evaluate(multi_device_iterator.initializer) for i in range(0, 10, 2): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.assertEqual(i, self.evaluate(elem_on_1)) self.assertEqual(i + 1, self.evaluate(elem_on_2)) with self.assertRaises(errors.OutOfRangeError): elem_on_1, elem_on_2 = multi_device_iterator.get_next() self.evaluate(elem_on_1) self.evaluate(elem_on_2) class MultiDeviceIteratorV2Test(test_base.DatasetTestBase): @test_util.run_v2_only def testBasic(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with ops.device("/cpu:0"): dataset = dataset_ops.Dataset.range(1000) mdi = multi_device_iterator_ops.MultiDeviceIteratorV2( dataset, ["/cpu:0", "/gpu:0"]) for i, el in enumerate(mdi): self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()]) @test_util.run_v2_only def testBasicFunction(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") queue = data_flow_ops.FIFOQueue(10, dtypes.int64) @def_function.function def fn(): with ops.device("/cpu:0"): dataset = dataset_ops.Dataset.range(10) iterator = multi_device_iterator_ops.MultiDeviceIteratorV2( dataset, ["/cpu:0", "/gpu:0"]) for _ in range(5): el0, el1 = next(iterator) queue.enqueue(el0) queue.enqueue(el1) fn() for i in range(10): self.assertEqual(queue.dequeue().numpy(), i) @test_util.run_v2_only def testFunctionError(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") # In this test we verify that a function that raises an error ends up # properly deallocating the iterator resource. queue = data_flow_ops.FIFOQueue(10, dtypes.int64) queue.enqueue(0) def init_fn(n): return n def next_fn(_): ds = dataset_ops.Dataset.range(0) return next(iter(ds)) def finalize_fn(n): queue.enqueue(0) return n @def_function.function def fn(): dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn) iterator = multi_device_iterator_ops.MultiDeviceIteratorV2( dataset, ["/cpu:0", "/gpu:0"]) next(iterator) with self.assertRaises(errors.OutOfRangeError): fn() self.assertEqual(queue.size().numpy(), 2) @test_util.run_v2_only def testMultipleInitializations(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with ops.device("/cpu:0"): dataset = dataset_ops.Dataset.range(1000) for _ in range(5): multi_device_iterator = multi_device_iterator_ops.MultiDeviceIteratorV2( dataset, ["/cpu:0", "/gpu:0"]) for i, el in enumerate(multi_device_iterator): self.assertEqual([i * 2, i * 2 + 1], [el[0].numpy(), el[1].numpy()]) @test_util.run_v2_only def testLimitedRetracing(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") trace_count = [0] @def_function.function def f(iterator): trace_count[0] += 1 counter = np.int64(0) for _ in range(5): elem = next(iterator) counter += elem[0] counter += elem[1] return counter dataset = dataset_ops.Dataset.range(10) dataset2 = dataset_ops.Dataset.range(20) for _ in range(10): multi_device_iterator = multi_device_iterator_ops.MultiDeviceIteratorV2( dataset, ["/cpu:0", "/gpu:0"]) self.assertEqual(self.evaluate(f(multi_device_iterator)), 45) multi_device_iterator2 = multi_device_iterator_ops.MultiDeviceIteratorV2( dataset2, ["/cpu:0", "/gpu:0"]) self.assertEqual(self.evaluate(f(multi_device_iterator2)), 45) self.assertEqual(trace_count[0], 1) if __name__ == "__main__": ops.enable_eager_execution( config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1})) test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/multi_device_iterator_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.repeat()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import test_util from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class RepeatTest(test_base.DatasetTestBase): def testRepeatTensorDataset(self): """Test a dataset that repeats its input multiple times.""" components = (np.array(1), np.array([1, 2, 3]), np.array(37.0)) # This placeholder can be fed when dataset-definition subgraph # runs (i.e. `init_op` below) to configure the number of # repetitions used in a particular iterator. def do_test(count): dataset = dataset_ops.Dataset.from_tensors(components).repeat(count) self.assertEqual( [c.shape for c in components], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)]) self.assertDatasetProduces(dataset, [components] * count) # Test a finite repetition. do_test(3) # test a different finite repetition. do_test(7) # Test an empty repetition. do_test(0) # Test an infinite repetition. # NOTE(mrry): There's not a good way to test that the sequence # actually is infinite. dataset = dataset_ops.Dataset.from_tensors(components).repeat(-1) self.assertEqual( [c.shape for c in components], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)]) get_next = self.getNext(dataset) for _ in range(17): results = self.evaluate(get_next()) for component, result_component in zip(components, results): self.assertAllEqual(component, result_component) def testRepeatRepeatTensorDataset(self): """Test the composition of repeat datasets.""" components = (np.array(1), np.array([1, 2, 3]), np.array(37.0)) inner_count, outer_count = 7, 14 dataset = dataset_ops.Dataset.from_tensors(components).repeat( inner_count).repeat(outer_count) self.assertEqual( [c.shape for c in components], [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)]) self.assertDatasetProduces(dataset, [components] * (inner_count * outer_count)) def testRepeatEmptyDataset(self): """Test that repeating an empty dataset does not hang.""" dataset = dataset_ops.Dataset.from_tensors(0).repeat(10).skip(10).repeat(-1) self.assertDatasetProduces(dataset, []) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/repeat_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.window()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test @test_util.run_all_in_graph_and_eager_modes class WindowTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters( ("1", 20, 14, 7, 1), ("2", 20, 17, 9, 1), ("3", 20, 14, 14, 1), ("4", 20, 10, 14, 1), ("5", 20, 14, 19, 1), ("6", 20, 4, 1, 2), ("7", 20, 2, 1, 6), ("8", 20, 4, 7, 2), ("9", 20, 2, 7, 6), ("10", 1, 10, 4, 1), ("11", 0, 10, 4, 1), ("12", 20, 14, 7, 1, False), ("13", 20, 17, 9, 1, False), ("14", 20, 14, 14, 1, False), ("15", 20, 10, 14, 1, False), ("16", 20, 14, 19, 1, False), ("17", 20, 4, 1, 2, False), ("18", 20, 2, 1, 6, False), ("19", 20, 4, 7, 2, False), ("20", 20, 2, 7, 6, False), ("21", 1, 10, 4, 1, False), ("22", 0, 10, 4, 1, False), ) def testWindowDataset(self, count, size, shift, stride, drop_remainder=True): """Tests a dataset that slides a window its input elements.""" components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) def _flat_map_fn(x, y, z): return dataset_ops.Dataset.zip((x.batch(batch_size=size), y.batch(batch_size=size), z.batch(batch_size=size))) dataset = dataset_ops.Dataset.from_tensor_slices(components).map( _map_fn).repeat(count).window( size=size, shift=shift, stride=stride, drop_remainder=drop_remainder).flat_map(_flat_map_fn) get_next = self.getNext(dataset) self.assertEqual([[None] + list(c.shape[1:]) for c in components], [ts.as_list() for ts in nest.flatten( dataset_ops.get_legacy_output_shapes(dataset))]) num_full_batches = max(0, (count * 7 - ((size - 1) * stride + 1)) // shift + 1) for i in range(num_full_batches): result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range(size): self.assertAllEqual(component[(i * shift + j * stride) % 7]**2, result_component[j]) if not drop_remainder: num_partial_batches = (count * 7) // shift + ( (count * 7) % shift > 0) - num_full_batches for i in range(num_partial_batches): result = self.evaluate(get_next()) for component, result_component in zip(components, result): remaining = (count * 7) - ((num_full_batches + i) * shift) num_elements = remaining // stride + ((remaining % stride) > 0) for j in range(num_elements): self.assertAllEqual( component[((num_full_batches + i) * shift + j * stride) % 7]**2, result_component[j]) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @parameterized.named_parameters( ("1", 14, 0, 3, 1), ("2", 14, 3, 0, 1), ("3", 14, 3, 3, 0), ) def testWindowDatasetInvalid(self, count, size, shift, stride): with self.assertRaises(errors.InvalidArgumentError): ds = dataset_ops.Dataset.range(10).map(lambda x: x).repeat(count).window( size=size, shift=shift, stride=stride).flat_map(lambda x: x.batch(batch_size=size)) self.evaluate(ds._variant_tensor) def testWindowSparse(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) dataset = dataset_ops.Dataset.range(10).map(_sparse).window( size=5, shift=3, drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5)) num_batches = (10 - 5) // 3 + 1 expected_output = [ sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]], values=[i * 3, i * 3 + 1, i * 3 + 2, i * 3 + 3, i * 3 + 4], dense_shape=[5, 1]) for i in range(num_batches) ] self.assertDatasetProduces(dataset, expected_output=expected_output) def testWindowSparseWithDifferentDenseShapes(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=array_ops.expand_dims( math_ops.range(i, dtype=dtypes.int64), 1), values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i), dense_shape=[i]) dataset = dataset_ops.Dataset.range(10).map(_sparse).window( size=5, shift=3, drop_remainder=True).flat_map(lambda x: x.batch(batch_size=5)) expected_output = [] num_batches = (10 - 5) // 3 + 1 for i in range(num_batches): expected_indices = [] expected_values = [] for j in range(5): for k in range(i * 3 + j): expected_indices.append([j, k]) expected_values.append(i * 3 + j) expected_output.append( sparse_tensor.SparseTensorValue( indices=expected_indices, values=expected_values, dense_shape=[5, i * 3 + 5 - 1])) self.assertDatasetProduces(dataset, expected_output=expected_output) def testNestedWindowSparse(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) dataset = dataset_ops.Dataset.range(10).map(_sparse).window( size=4, shift=2, drop_remainder=True).flat_map(lambda x: x.batch(batch_size=4)).window( size=3, shift=1, drop_remainder=True).flat_map(lambda x: x.batch(batch_size=3)) expected_output = [ sparse_tensor.SparseTensorValue( indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]], values=[0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7], dense_shape=[3, 4, 1]), sparse_tensor.SparseTensorValue( indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0], [2, 0, 0], [2, 1, 0], [2, 2, 0], [2, 3, 0]], values=[2, 3, 4, 5, 4, 5, 6, 7, 6, 7, 8, 9], dense_shape=[3, 4, 1]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) def testWindowShapeError(self): def generator(): yield [1.0, 2.0, 3.0] yield [4.0, 5.0, 6.0] yield [7.0, 8.0, 9.0, 10.0] dataset = dataset_ops.Dataset.from_generator( generator, dtypes.float32, output_shapes=[None]).window( size=3, shift=1).flat_map(lambda x: x.batch(batch_size=3)) self.assertDatasetProduces( dataset, expected_error=( errors.InvalidArgumentError, r"Cannot batch tensors with different shapes in component 0. " r"First element had shape \[3\] and element 2 had shape \[4\].")) def testWindowIgnoreErrors(self): input_values = np.float32([1., np.nan, 2., np.nan, 3.]) dataset = dataset_ops.Dataset.from_tensor_slices(input_values).map( lambda x: array_ops.check_numerics(x, "message")).window( size=2, shift=2, stride=2, drop_remainder=True).flat_map(lambda x: x.batch(batch_size=2)) self.assertDatasetProduces( dataset, expected_output=[np.float32([1., 2.]), np.float32([2., 3.])]) def testNestedOutput(self): if not context.executing_eagerly(): self.skipTest("self.evaluate() does not work with a dataset") dataset = dataset_ops.Dataset.range(100) dataset = dataset_ops.Dataset.zip((dataset, dataset)).window(10) for i, nested_dataset in enumerate(dataset): x, y = nested_dataset self.assertDatasetProduces(x, range(i*10, (i+1)*10)) self.assertDatasetProduces(y, range(i*10, (i+1)*10)) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/window_test.py
# -*- coding: utf-8 -*- # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.padded_batch()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import test from tensorflow.python.util import compat def _random_seq_lens(count): return np.random.randint(20, size=(count,)).astype(np.int32) @test_util.run_all_in_graph_and_eager_modes class PaddedBatchTest(test_base.DatasetTestBase, parameterized.TestCase): @parameterized.named_parameters( ('default_padding', _random_seq_lens(32), 4, [-1], False), ('constant_padding', _random_seq_lens(32), 4, [25], False), ('uneven_with_remainder', _random_seq_lens(34), 4, [-1], False), ('uneven_without_remainder', _random_seq_lens(34), 4, [-1], True), ) def testPaddedBatchDataset(self, seq_lens, batch_size, padded_shapes, drop_remainder): """Tests the padded batch dataset logic for various input configurations. Args: seq_lens: the input sequence lengths batch_size: the batch size padded_shapes: the padded shapes to use drop_remainder: whether a smaller batch size should be produced if batch size does not divide number of inputs evenly """ dataset = dataset_ops.Dataset.from_tensor_slices(seq_lens).map( lambda x: array_ops.fill([x], x)).padded_batch( batch_size=batch_size, drop_remainder=drop_remainder, padded_shapes=padded_shapes) num_full_batches = len(seq_lens) // batch_size get_next = self.getNext(dataset) for i in range(num_full_batches): result = self.evaluate(get_next()) padded_len = padded_shapes[0] if padded_len is None or padded_len == -1: padded_len = np.max(result) if result.size > 0 else 0 self.assertEqual((batch_size, padded_len), result.shape) for j in range(batch_size): seq_len = seq_lens[(i * batch_size) + j] self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len) self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len)) if not drop_remainder and len(seq_lens) % batch_size > 0: result = self.evaluate(get_next()) padded_len = np.max(result) if result.size > 0 else 0 self.assertEqual((len(seq_lens) % batch_size, padded_len), result.shape) for j in range(len(seq_lens) % batch_size): seq_len = seq_lens[num_full_batches * batch_size + j] self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len) self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) @test_util.run_deprecated_v1 def testPaddedBatchShortPadding(self): dataset = ( dataset_ops.Dataset.from_tensor_slices( [6, 5, 5, 5, 5]).map(lambda x: array_ops.fill([x], x)).padded_batch( batch_size=4, padded_shapes=[5])) self.assertDatasetProduces( dataset, expected_error=(errors.DataLossError, '')) def testPaddedBatchEmptyTensors(self): dataset = ( dataset_ops.Dataset.from_tensor_slices( [0, 0, 0, 0]).map(lambda x: array_ops.fill([x], x)).padded_batch( batch_size=4, padded_shapes=[-1])) self.assertDatasetProduces(dataset, expected_output=[[[], [], [], []]]) def testPaddedBatchDatasetNonDefaultPadding(self): def fill_tuple(x): filled = array_ops.fill([x], x) return (filled, string_ops.as_string(filled)) random_seq_lens = np.random.randint(20, size=(32,)).astype(np.int32) dataset = ( dataset_ops.Dataset.from_tensor_slices(random_seq_lens).map(fill_tuple) .padded_batch( 4, padded_shapes=([-1], [-1]), padding_values=(-1, '<end>'))) get_next = self.getNext(dataset) for i in range(8): result = self.evaluate(get_next()) padded_len = np.max(result[0]) self.assertEqual((4, padded_len), result[0].shape) self.assertEqual((4, padded_len), result[1].shape) for j in range(4): seq_len = random_seq_lens[(i * 4) + j] self.assertAllEqual(result[0][j, :seq_len], [seq_len] * seq_len) self.assertAllEqual(result[0][j, seq_len:], [-1] * (padded_len - seq_len)) self.assertAllEqual(result[1][j, :seq_len], [compat.as_bytes(str(seq_len))] * seq_len) self.assertAllEqual(result[1][j, seq_len:], [b'<end>'] * (padded_len - seq_len)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(get_next()) def testPaddedBatchDatasetUnicode(self): # See GitHub issue 16149 def generator(): data = [[u'Простой', u'тест', u'юникода'], [u'никогда', u'не', u'бывает', u'простым']] for seq in data: yield seq, [0, 1, 2, 3] dataset = dataset_ops.Dataset.from_generator( generator, (dtypes.string, dtypes.int32), (tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None]))) padded_dataset = dataset.padded_batch( 2, padded_shapes=([None], [None]), padding_values=('', 0)) next_element = self.getNext(padded_dataset) self.evaluate(next_element()) # NOTE: This test is specific to graph mode and is skipped in eager mode. @test_util.run_deprecated_v1 def testSkipEagerPaddedBatchDatasetShapeSpecifications(self): int_placeholder = array_ops.placeholder(dtypes.int32) float_placeholder = array_ops.placeholder(dtypes.float32) string_placeholder = array_ops.placeholder(dtypes.string) input_dataset = dataset_ops.Dataset.from_tensors( (int_placeholder, float_placeholder, string_placeholder)) # Test different ways of specifying the `padded_shapes` argument. dynamic_padding_from_tensor_shapes = input_dataset.padded_batch( 32, padded_shapes=(tensor_shape.TensorShape([None]), tensor_shape.TensorShape([None, None]), tensor_shape.TensorShape([37]))) dynamic_padding_from_lists = input_dataset.padded_batch( 32, padded_shapes=([None], [None, None], [37])) dynamic_padding_from_lists_with_minus_one = input_dataset.padded_batch( 32, padded_shapes=([-1], [-1, -1], [37])) dynamic_padding_from_tensors = input_dataset.padded_batch( 32, padded_shapes=(constant_op.constant([-1], dtype=dtypes.int64), constant_op.constant([-1, -1], dtype=dtypes.int64), constant_op.constant([37], dtype=dtypes.int64))) for dataset in [ dynamic_padding_from_tensor_shapes, dynamic_padding_from_lists, dynamic_padding_from_lists_with_minus_one, dynamic_padding_from_tensors ]: dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset) self.assertEqual([None, None], dataset_output_shapes[0].as_list()) self.assertEqual([None, None, None], dataset_output_shapes[1].as_list()) self.assertEqual([None, 37], dataset_output_shapes[2].as_list()) def testPaddedBatchSparseError(self): def _map_fn(i): return sparse_tensor.SparseTensorValue( indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i with self.assertRaises(TypeError): _ = dataset_ops.Dataset.range(10).map(_map_fn).padded_batch(10) def testPaddedBatchShapeError(self): with self.assertRaisesRegexp( ValueError, r'The padded shape \(1,\) is not compatible with the ' r'corresponding input component shape \(\).'): _ = dataset_ops.Dataset.range(10).padded_batch(5, padded_shapes=[1]) with self.assertRaisesRegexp( ValueError, r'The padded shape \(1,\) is not compatible with the ' r'corresponding input component shape \(3,\).'): _ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch( 5, padded_shapes=[1]) with self.assertRaisesRegexp( ValueError, r'Padded shape .* must be a 1-D tensor ' r'of tf.int64 values, but its shape was \(2, 2\).'): _ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch( 5, padded_shapes=[[1, 1], [1, 1]]) with self.assertRaisesRegexp( TypeError, r'Padded shape .* must be a 1-D tensor ' r'of tf.int64 values, but its element type was float32.'): _ = dataset_ops.Dataset.from_tensors([1, 2, 3]).padded_batch( 5, padded_shapes=constant_op.constant([1.5, 2., 3.])) with self.assertRaisesRegexp( ValueError, r'The padded shape \(1,\) is not compatible with the ' r'corresponding input component shape \(\).'): shape_as_tensor = constant_op.constant([1], dtype=dtypes.int64) _ = dataset_ops.Dataset.range(10).padded_batch( 5, padded_shapes=shape_as_tensor) # NOTE: This test is specific to graph mode and is skipped in eager mode. @test_util.run_deprecated_v1 def testSkipEagerPaddedBatchShapeError(self): with self.assertRaisesRegexp( ValueError, r'The padded shape \((\?|None), (\?|None)\) is not compatible with the ' r'corresponding input component shape \(\).'): shape_as_tensor = array_ops.placeholder(dtypes.int64, shape=[2]) _ = dataset_ops.Dataset.range(10).padded_batch( 5, padded_shapes=shape_as_tensor) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/padded_batch_test.py
# -*- coding: utf-8 -*- # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for `tf.data.Dataset.batch()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import combinations from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.ragged import ragged_concat_ops from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_math_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test class BatchTest(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( count=[0, 28], batch_size=[14, 15], drop_remainder=[True, False]))) def testBasic(self, count, batch_size, drop_remainder): """Tests the batch dataset logic for various input configurations. Args: count: the number of input elements batch_size: the batch size drop_remainder: whether a smaller batch size should be produced if batch size does not divide number of inputs evenly """ # The pipeline is TensorSliceDataset -> MapDataset(square_3) -> # RepeatDataset(count) -> BatchDataset(batch_size). components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7)) def _map_fn(x, y, z): return math_ops.square(x), math_ops.square(y), math_ops.square(z) dataset = dataset_ops.Dataset.from_tensor_slices(components).map( _map_fn).repeat(count).batch(batch_size, drop_remainder) get_next = self.getNext(dataset) if drop_remainder: dim0 = batch_size else: dim0 = None self.assertEqual( [ts.as_list() for ts in nest.flatten( dataset_ops.get_legacy_output_shapes(dataset))], [[dim0] + list(c.shape[1:]) for c in components]) num_full_batches = (count * 7) // batch_size for i in range(num_full_batches): result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range(batch_size): self.assertAllEqual(component[(i * batch_size + j) % 7]**2, result_component[j]) if not drop_remainder and (count * 7) % batch_size > 0: result = self.evaluate(get_next()) for component, result_component in zip(components, result): for j in range((count * 7) % batch_size): self.assertAllEqual( component[(num_full_batches * batch_size + j) % 7]**2, result_component[j]) with self.assertRaises(errors.OutOfRangeError): result = self.evaluate(get_next()) @combinations.generate(test_base.default_test_combinations()) def testInvalidBatchSize(self): with self.assertRaises(errors.InvalidArgumentError): dataset = (dataset_ops.Dataset.range(10).batch(0)) self.evaluate(dataset._variant_tensor) @combinations.generate(test_base.default_test_combinations()) def testDataset(self): def map_fn(i): return dataset_ops.Dataset.from_tensors(i) dataset = dataset_ops.Dataset.range(10).map(map_fn).batch(5) dataset = dataset.map(lambda x: x) dataset = dataset.unbatch().flat_map(lambda x: x) self.assertDatasetProduces(dataset, expected_output=range(10)) def testSparse(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5) expected_output = [ sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]], values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4], dense_shape=[5, 1]) for i in range(2) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testSparseWithDifferentDenseShapes(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=array_ops.expand_dims( math_ops.range(i, dtype=dtypes.int64), 1), values=array_ops.fill([math_ops.cast(i, dtypes.int32)], i), dense_shape=[i]) dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5) expected_output = [] for i in range(2): expected_indices = [] expected_outputs = [] for j in range(5): for k in range(i * 5 + j): expected_indices.append([j, k]) expected_outputs.append(i * 5 + j) expected_output.append( sparse_tensor.SparseTensorValue( indices=expected_indices, values=expected_outputs, dense_shape=[5, (i + 1) * 5 - 1])) self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testSparseNested(self): def _sparse(i): return sparse_tensor.SparseTensorValue( indices=[[0]], values=(i * [1]), dense_shape=[1]) dataset = dataset_ops.Dataset.range(10).map(_sparse).batch(5).batch(2) expected_output = [ sparse_tensor.SparseTensorValue( indices=[[0, 0, 0], [0, 1, 0], [0, 2, 0], [0, 3, 0], [0, 4, 0], [1, 0, 0], [1, 1, 0], [1, 2, 0], [1, 3, 0], [1, 4, 0]], values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dense_shape=[2, 5, 1]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testShapeError(self): def generator(): yield [1.0, 2.0, 3.0] yield [4.0, 5.0, 6.0] yield [7.0, 8.0, 9.0, 10.0] dataset = ( dataset_ops.Dataset.from_generator( generator, dtypes.float32, output_shapes=[None]).batch(3)) self.assertDatasetProduces( dataset, expected_error=( errors.InvalidArgumentError, r'Cannot batch tensors with different shapes in component 0. First ' r'element had shape \[3\] and element 2 had shape \[4\].')) @combinations.generate(test_base.default_test_combinations()) def testRagged(self): def _ragged(i): return ragged_tensor.RaggedTensor.from_tensor(i * [[1]]) dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5) expected_output = [ ragged_factory_ops.constant([[[0]], [[1]], [[2]], [[3]], [[4]]]), ragged_factory_ops.constant([[[5]], [[6]], [[7]], [[8]], [[9]]]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testRaggedWithDifferentShapes(self): dataset = dataset_ops.Dataset.range(10).map(ragged_math_ops.range).batch(5) expected_output = [ ragged_concat_ops.stack([ragged_math_ops.range(i) for i in range(5)]), ragged_concat_ops.stack( [ragged_math_ops.range(i) for i in range(5, 10)]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) @combinations.generate(test_base.default_test_combinations()) def testRaggedNested(self): def _ragged(i): return ragged_tensor.RaggedTensor.from_tensor(i * [[1]]) dataset = dataset_ops.Dataset.range(10).map(_ragged).batch(5).batch(2) expected_output = [ ragged_factory_ops.constant([[[[0]], [[1]], [[2]], [[3]], [[4]]], [[[5]], [[6]], [[7]], [[8]], [[9]]]]) ] self.assertDatasetProduces(dataset, expected_output=expected_output) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/kernel_tests/batch_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for `tf.data.Dataset.filter()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.benchmarks import benchmark_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.ops import array_ops # TODO(b/119837791): Add eager benchmarks. class FilterBenchmark(benchmark_base.DatasetBenchmarkBase): """Benchmarks for `tf.data.Dataset.filter()`.""" def _benchmark(self, predicate, name): dataset = ( dataset_ops.Dataset.from_tensors(True).repeat(None).filter(predicate)) self.run_and_report_benchmark(dataset, num_elements=100000, name=name) def benchmark_simple_function(self): self._benchmark(array_ops.identity, "simple_function") def benchmark_return_component_optimization(self): self._benchmark(lambda x: x, "return_component") if __name__ == "__main__": benchmark_base.test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/filter_benchmark.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for `tf.data.Dataset.list_files()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from os import path from os import makedirs import shutil import time import tempfile import numpy as np from tensorflow.python.client import session from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.platform import test class ListFilesBenchmark(test.Benchmark): """Benchmarks for `tf.data.Dataset.list_files()`.""" def benchmark_nested_directories(self): tmp_dir = tempfile.mkdtemp() width = 1024 depth = 16 for i in range(width): for j in range(depth): new_base = path.join(tmp_dir, str(i), *[str(dir_name) for dir_name in range(j)]) makedirs(new_base) child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log'] for f in child_files: filename = path.join(new_base, f) open(filename, 'w').close() patterns = [ path.join(tmp_dir, path.join(*['**' for _ in range(depth)]), suffix) for suffix in ['*.txt', '*.log'] ] deltas = [] iters = 3 for _ in range(iters): with ops.Graph().as_default(): dataset = dataset_ops.Dataset.list_files(patterns) options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) next_element = dataset.make_one_shot_iterator().get_next() with session.Session() as sess: sub_deltas = [] while True: try: start = time.time() sess.run(next_element) end = time.time() sub_deltas.append(end - start) except errors.OutOfRangeError: break deltas.append(sub_deltas) median_deltas = np.median(deltas, axis=0) self.report_benchmark( iters=iters, wall_time=np.sum(median_deltas), extras={ 'read first file:': median_deltas[0], 'read second file:': median_deltas[1], 'avg time for reading %d more filenames:' % (len(median_deltas) - 2): np.average(median_deltas[2:]) }, name='nested_directory(%d*%d)' % (width, depth)) shutil.rmtree(tmp_dir, ignore_errors=True) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/list_files_benchmark.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for `tf.data.Dataset.batch()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.benchmarks import benchmark_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.framework import sparse_tensor class BatchBenchmark(benchmark_base.DatasetBenchmarkBase): """Benchmarks for `tf.data.Dataset.batch()`.""" def benchmark_batch_sparse(self): non_zeros_per_row_values = [0, 1, 5, 10, 100] batch_size_values = [1, 32, 64, 128, 1024] for non_zeros_per_row in non_zeros_per_row_values: tensor = sparse_tensor.SparseTensor( indices=np.arange(non_zeros_per_row, dtype=np.int64)[:, np.newaxis], values=np.arange(non_zeros_per_row, dtype=np.int64), dense_shape=[1000]) for batch_size in batch_size_values: dataset = dataset_ops.Dataset.from_tensors(tensor).repeat().batch( batch_size) self.run_and_report_benchmark( dataset, num_elements=100000 // batch_size, iters=1, name="sparse_num_elements_%d_batch_size_%d" % (non_zeros_per_row, batch_size)) def benchmark_batch_dense(self): for element_exp in [10, 12, 14, 16, 18, 20, 22]: for batch_exp in [3, 6, 9]: for parallel_copy in [True, False]: element_size = 1 << element_exp batch_size = 1 << batch_exp dataset = dataset_ops.Dataset.from_tensors( np.random.rand(element_size)).repeat().batch(batch_size) options = dataset_ops.Options() options.experimental_optimization.parallel_batch = parallel_copy dataset = dataset.with_options(options) tag = "_parallel" if parallel_copy else "" self.run_and_report_benchmark( dataset, num_elements=(1 << (22 - batch_exp - element_exp // 2)), iters=1, name="batch_element_size_%d_batch_size_%d%s" % (element_size, batch_size, tag)) if __name__ == "__main__": benchmark_base.test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/batch_benchmark.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test utilities for tf.data benchmarking functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from tensorflow.python.client import session from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.platform import test # TODO(b/119837791): Add eager benchmarks. class DatasetBenchmarkBase(test.Benchmark): """Base class for dataset benchmarks.""" def run_benchmark(self, dataset, num_elements, iters=1, warmup=True): """Benchmarks the dataset. Runs the dataset `iters` times. In each iteration, the benchmark measures the time it takes to go through `num_elements` elements of the dataset. Args: dataset: Dataset to benchmark. num_elements: Number of dataset elements to iterate through each benchmark iteration. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. Returns: A float, representing the per-element wall time of the dataset in seconds. This is the median time (with respect to `iters`) it takes for the dataset to go through `num_elements` elements, divided by `num_elements.` """ options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False dataset = dataset.with_options(options) # NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding # the overhead of multiple `session.run()` calls. Note that this relies on # the underlying implementation of `skip`: if it is optimized in the future, # we will have to change this code. dataset = dataset.skip(num_elements - 1) iterator = dataset_ops.make_initializable_iterator(dataset) next_element = iterator.get_next() next_element = nest.flatten(next_element)[0] deltas = [] for _ in range(iters): with session.Session() as sess: if warmup: # Run once to warm up the session caches. sess.run(iterator.initializer) sess.run(next_element) sess.run(iterator.initializer) start = time.time() sess.run(next_element.op) end = time.time() deltas.append(end - start) return np.median(deltas) / float(num_elements) def run_and_report_benchmark(self, dataset, num_elements, name, iters=5, extras=None, warmup=True): # Measure the per-element wall time. wall_time = self.run_benchmark(dataset, num_elements, iters, warmup) if extras is None: extras = {} extras["num_elements"] = num_elements self.report_benchmark( wall_time=wall_time, iters=iters, name=name, extras=extras)
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/benchmark_base.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test utilities for tf.data benchmarking functionality.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import timeit import numpy as np from tensorflow.python.client import session from tensorflow.python.data.experimental.ops import sleep from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.platform import test class MetaBenchmark(test.Benchmark): """Benchmark that compares various ways of running tf.data benchmarks.""" # Note that each of these benchmarks is a separate method so that we can # run them independently and collect a performance profile. def setup_fast_dataset(self): self.num_reps = 15 self.iters = 100000 options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False return dataset_ops.Dataset.range(10000**2).with_options(options) def benchmark_fast_dataset_with_only_cpp_iterations(self): dataset = self.setup_fast_dataset() self.run_benchmark_with_only_cpp_iterations(dataset) def benchmark_fast_dataset_with_session_run(self): dataset = self.setup_fast_dataset() self.run_benchmark_with_session_run(dataset) def benchmark_fast_dataset_with_session_callable(self): dataset = self.setup_fast_dataset() self.run_benchmark_with_session_run(dataset, make_callable=True) def benchmark_fast_dataset_in_eager(self): with context.eager_mode(): dataset = self.setup_fast_dataset() self.run_benchmark_in_eager(dataset) def setup_slow_dataset(self): dataset = self.setup_fast_dataset() self.iters = 1000 # sleep for 1e-3s per iteration return dataset.apply(sleep.sleep(1000)) def benchmark_slow_dataset_with_only_cpp_iterations(self): dataset = self.setup_slow_dataset() self.run_benchmark_with_only_cpp_iterations(dataset) def benchmark_slow_dataset_with_session_run(self): dataset = self.setup_slow_dataset() self.run_benchmark_with_session_run(dataset) def benchmark_slow_dataset_with_session_callable(self): dataset = self.setup_slow_dataset() self.run_benchmark_with_session_run(dataset, make_callable=True) def benchmark_slow_dataset_in_eager(self): with context.eager_mode(): dataset = self.setup_slow_dataset() self.run_benchmark_in_eager(dataset) def report(self, deltas): # Each `delta` is the time taken for `self.iters` iterations. Divide by the # number of iterations here to get per-element iteration time. deltas = np.array(deltas) / self.iters # Discard the first 5 results from "warming up" the session. deltas = deltas[5:] median = np.median(deltas) mean = np.mean(deltas) min_val = np.min(deltas) max_val = np.max(deltas) extras = { "iters_per_second": 1 / median, "median": median, "mean": mean, "min": min_val, "max": max_val, "num_reps": self.num_reps - 5, } self.report_benchmark(wall_time=median, iters=self.iters, extras=extras) def run_benchmark_in_eager(self, dataset): deltas = [] for _ in range(self.num_reps): iterator = iter(dataset) deltas.append(timeit.timeit(lambda: next(iterator), number=self.iters)) # pylint: disable=cell-var-from-loop self.report(deltas) def run_benchmark_with_session_run(self, dataset, make_callable=False): iterator = dataset_ops.make_initializable_iterator(dataset) next_element = iterator.get_next() with session.Session() as sess: deltas = [] for _ in range(self.num_reps): if make_callable: get_next_element = sess.make_callable(next_element) else: # Note: session.run(next_element.op) is more performant than # session.run(next_element) because we avoid the cost of copying the # tensor from C++ to python. get_next_element = lambda: sess.run(next_element.op) sess.run(iterator.initializer) deltas.append(timeit.timeit(get_next_element, number=self.iters)) self.report(deltas) def run_benchmark_with_only_cpp_iterations(self, dataset): """Benchmarks the dataset with the iterations performed in C++.""" # NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding # the overhead of multiple `session.run()` calls. Note that this relies on # the underlying implementation of `skip`: if it is optimized in the future, # we will have to change this code. dataset = dataset.skip(self.iters - 1) iterator = dataset_ops.make_initializable_iterator(dataset) next_element = iterator.get_next() with session.Session() as sess: deltas = [] for _ in range(self.num_reps): sess.run(iterator.initializer) deltas.append( timeit.timeit(lambda: sess.run(next_element.op), number=1)) self.report(deltas) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/meta_benchmark.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bechmarks for `tf.data.Dataset.map()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.benchmarks import benchmark_base from tensorflow.python.data.ops import dataset_ops # TODO(b/119837791): Add eager benchmarks. class MapBenchmark(benchmark_base.DatasetBenchmarkBase): """Benchmarks for `tf.data.Dataset.map()`.""" def benchmark_chain_of_maps(self): def benchmark_helper(chain_length, map_fn, use_inter_op_parallelism, label): dataset = dataset_ops.Dataset.from_tensors(0).repeat(None) for _ in range(chain_length): dataset = dataset_ops.MapDataset( dataset, map_fn, use_inter_op_parallelism=use_inter_op_parallelism) self.run_and_report_benchmark( dataset, num_elements=10000, name="chain_length_%d%s" % (chain_length, label)) chain_lengths = [0, 1, 2, 5, 10, 20, 50] for chain_length in chain_lengths: benchmark_helper(chain_length, lambda x: x + 1, True, "") benchmark_helper(chain_length, lambda x: x + 1, False, "_single_threaded") benchmark_helper(chain_length, lambda x: x, True, "_short_circuit") def benchmark_map_fan_out(self): fan_outs = [1, 2, 5, 10, 20, 50, 100] def benchmark_helper(fan_out, map_fn, use_inter_op_parallelism, label): dataset = dataset_ops.Dataset.from_tensors( tuple(0 for _ in range(fan_out))).repeat(None) dataset = dataset_ops.MapDataset( dataset, map_fn, use_inter_op_parallelism=use_inter_op_parallelism) self.run_and_report_benchmark( dataset, num_elements=10000, name="fan_out_%d%s" % (fan_out, label)) for fan_out in fan_outs: benchmark_helper(fan_out, lambda *xs: [x + 1 for x in xs], True, "") benchmark_helper(fan_out, lambda *xs: [x + 1 for x in xs], False, "_single_threaded") benchmark_helper(fan_out, lambda *xs: xs, True, "_short_circuit") if __name__ == "__main__": benchmark_base.test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/map_benchmark.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for `tf.data.Dataset.range()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.benchmarks import benchmark_base from tensorflow.python.data.ops import dataset_ops class RangeBenchmark(benchmark_base.DatasetBenchmarkBase): """Benchmarks for `tf.data.Dataset.range()`.""" def benchmark_range(self): for modeling_enabled in [False, True]: num_elements = 10000000 if modeling_enabled else 50000000 options = dataset_ops.Options() options.experimental_optimization.autotune = modeling_enabled dataset = dataset_ops.Dataset.range(num_elements) dataset = dataset.with_options(options) self.run_and_report_benchmark( dataset, num_elements=num_elements, name="modeling_%s" % ("on" if modeling_enabled else "off")) if __name__ == "__main__": benchmark_base.test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/range_benchmark.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Benchmarks for `tf.data.Dataset.from_tensor_slices()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.data.benchmarks import benchmark_base from tensorflow.python.data.ops import dataset_ops # TODO(b/119837791): Add eager benchmarks. class FromTensorSlicesBenchmark(benchmark_base.DatasetBenchmarkBase): """Benchmarks for `tf.data.Dataset.from_tensor_slices()`.""" def benchmark_slice_repeat_batch(self): input_size = 10000 batch_size = 100 num_epochs = 100 num_elements = input_size * num_epochs // batch_size input_data = np.random.randn(input_size) dataset = ( dataset_ops.Dataset.from_tensor_slices(input_data).repeat( num_epochs).batch(batch_size)) self.run_and_report_benchmark( dataset, num_elements=num_elements, name="slice_repeat_batch_input_%d_batch_%d" % (input_size, batch_size)) def benchmark_reshape_slice_repeat(self): input_size = 10000 reshape_dim = [100, 100] num_epochs = 100 num_elements = num_epochs * reshape_dim[0] input_data = np.random.randn(input_size) dataset = ( dataset_ops.Dataset.from_tensor_slices( input_data.reshape(*reshape_dim)).repeat(num_epochs)) self.run_and_report_benchmark( dataset, num_elements=num_elements, name="reshape_slice_repeat_input_%d" % input_size, ) def benchmark_slice_batch_cache_repeat(self): input_size = 10000 batch_size = 100 num_epochs = 100 num_elements = input_size * num_epochs // batch_size input_data = np.random.randn(input_size) dataset = ( dataset_ops.Dataset.from_tensor_slices(input_data).batch( batch_size).cache().repeat(num_epochs)) self.run_and_report_benchmark( dataset, num_elements=num_elements, name="slice_batch_cache_repeat_input_%d_batch_%d" % (input_size, batch_size)) if __name__ == "__main__": benchmark_base.test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/benchmarks/from_tensor_slices_benchmark.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python wrappers for reader Datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import convert from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops from tensorflow.python.util.tf_export import tf_export # TODO(b/64974358): Increase default buffer size to 256 MB. _DEFAULT_READER_BUFFER_SIZE_BYTES = 256 * 1024 # 256 KB def _create_or_validate_filenames_dataset(filenames): """Creates (or validates) a dataset of filenames. Args: filenames: Either a list or dataset of filenames. If it is a list, it is convert to a dataset. If it is a dataset, its type and shape is validated. Returns: A dataset of filenames. """ if isinstance(filenames, dataset_ops.DatasetV2): if dataset_ops.get_legacy_output_types(filenames) != dtypes.string: raise TypeError( "`filenames` must be a `tf.data.Dataset` of `tf.string` elements.") if not dataset_ops.get_legacy_output_shapes(filenames).is_compatible_with( tensor_shape.TensorShape([])): raise TypeError( "`filenames` must be a `tf.data.Dataset` of scalar `tf.string` " "elements.") else: filenames = ops.convert_to_tensor(filenames, dtype=dtypes.string) filenames = array_ops.reshape(filenames, [-1], name="flat_filenames") filenames = dataset_ops.DatasetV2.from_tensor_slices(filenames) return filenames def _create_dataset_reader(dataset_creator, filenames, num_parallel_reads=None): """Creates a dataset that reads the given files using the given reader. Args: dataset_creator: A function that takes in a single file name and returns a dataset. filenames: A `tf.data.Dataset` containing one or more filenames. num_parallel_reads: The number of parallel reads we should do. Returns: A `Dataset` that reads data from `filenames`. """ def read_one_file(filename): filename = ops.convert_to_tensor(filename, dtypes.string, name="filename") return dataset_creator(filename) if num_parallel_reads is None: return filenames.flat_map(read_one_file) elif num_parallel_reads == dataset_ops.AUTOTUNE: return filenames.interleave( read_one_file, num_parallel_calls=num_parallel_reads) else: return ParallelInterleaveDataset( filenames, read_one_file, cycle_length=num_parallel_reads, block_length=1, sloppy=False, buffer_output_elements=None, prefetch_input_elements=None) class _TextLineDataset(dataset_ops.DatasetSource): """A `Dataset` comprising records from one or more text files.""" def __init__(self, filenames, compression_type=None, buffer_size=None): """Creates a `TextLineDataset`. Args: filenames: A `tf.string` tensor containing one or more filenames. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes to buffer. A value of 0 results in the default buffering values chosen based on the compression type. """ self._filenames = filenames self._compression_type = convert.optional_param_to_tensor( "compression_type", compression_type, argument_default="", argument_dtype=dtypes.string) self._buffer_size = convert.optional_param_to_tensor( "buffer_size", buffer_size, argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES) variant_tensor = gen_dataset_ops.text_line_dataset( self._filenames, self._compression_type, self._buffer_size) super(_TextLineDataset, self).__init__(variant_tensor) @property def element_spec(self): return tensor_spec.TensorSpec([], dtypes.string) @tf_export("data.TextLineDataset", v1=[]) class TextLineDatasetV2(dataset_ops.DatasetSource): """A `Dataset` comprising lines from one or more text files.""" def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None): """Creates a `TextLineDataset`. Args: filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or more filenames. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes to buffer. A value of 0 results in the default buffering values chosen based on the compression type. num_parallel_reads: (Optional.) A `tf.int64` scalar representing the number of files to read in parallel. If greater than one, the records of files read in parallel are outputted in an interleaved order. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value greater than one to parallelize the I/O. If `None`, files will be read sequentially. """ filenames = _create_or_validate_filenames_dataset(filenames) self._filenames = filenames self._compression_type = compression_type self._buffer_size = buffer_size def creator_fn(filename): return _TextLineDataset(filename, compression_type, buffer_size) self._impl = _create_dataset_reader(creator_fn, filenames, num_parallel_reads) variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access super(TextLineDatasetV2, self).__init__(variant_tensor) @property def element_spec(self): return tensor_spec.TensorSpec([], dtypes.string) @tf_export(v1=["data.TextLineDataset"]) class TextLineDatasetV1(dataset_ops.DatasetV1Adapter): """A `Dataset` comprising lines from one or more text files.""" def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None): wrapped = TextLineDatasetV2(filenames, compression_type, buffer_size, num_parallel_reads) super(TextLineDatasetV1, self).__init__(wrapped) __init__.__doc__ = TextLineDatasetV2.__init__.__doc__ @property def _filenames(self): return self._dataset._filenames # pylint: disable=protected-access @_filenames.setter def _filenames(self, value): self._dataset._filenames = value # pylint: disable=protected-access class _TFRecordDataset(dataset_ops.DatasetSource): """A `Dataset` comprising records from one or more TFRecord files.""" def __init__(self, filenames, compression_type=None, buffer_size=None): """Creates a `TFRecordDataset`. Args: filenames: A `tf.string` tensor containing one or more filenames. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. buffer_size: (Optional.) A `tf.int64` scalar representing the number of bytes in the read buffer. 0 means no buffering. """ self._filenames = filenames self._compression_type = convert.optional_param_to_tensor( "compression_type", compression_type, argument_default="", argument_dtype=dtypes.string) self._buffer_size = convert.optional_param_to_tensor( "buffer_size", buffer_size, argument_default=_DEFAULT_READER_BUFFER_SIZE_BYTES) variant_tensor = gen_dataset_ops.tf_record_dataset( self._filenames, self._compression_type, self._buffer_size) super(_TFRecordDataset, self).__init__(variant_tensor) @property def element_spec(self): return tensor_spec.TensorSpec([], dtypes.string) class ParallelInterleaveDataset(dataset_ops.UnaryDataset): """A `Dataset` that maps a function over its input and flattens the result.""" def __init__(self, input_dataset, map_func, cycle_length, block_length, sloppy, buffer_output_elements, prefetch_input_elements): """See `tf.data.experimental.parallel_interleave()` for details.""" self._input_dataset = input_dataset self._map_func = dataset_ops.StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset) if not isinstance(self._map_func.output_structure, dataset_ops.DatasetSpec): raise TypeError("`map_func` must return a `Dataset` object.") self._element_spec = self._map_func.output_structure._element_spec # pylint: disable=protected-access self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") self._sloppy = ops.convert_to_tensor( sloppy, dtype=dtypes.bool, name="sloppy") self._buffer_output_elements = convert.optional_param_to_tensor( "buffer_output_elements", buffer_output_elements, argument_default=2 * block_length) self._prefetch_input_elements = convert.optional_param_to_tensor( "prefetch_input_elements", prefetch_input_elements, argument_default=2 * cycle_length) variant_tensor = ged_ops.parallel_interleave_dataset( self._input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, self._cycle_length, self._block_length, self._sloppy, self._buffer_output_elements, self._prefetch_input_elements, f=self._map_func.function, **self._flat_structure) super(ParallelInterleaveDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._element_spec def _transformation_name(self): return "tf.data.experimental.parallel_interleave()" @tf_export("data.TFRecordDataset", v1=[]) class TFRecordDatasetV2(dataset_ops.DatasetV2): """A `Dataset` comprising records from one or more TFRecord files.""" def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None): """Creates a `TFRecordDataset` to read one or more TFRecord files. Args: filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or more filenames. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. buffer_size: (Optional.) A `tf.int64` scalar representing the number of bytes in the read buffer. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value 1-100 MBs. If `None`, a sensible default for both local and remote file systems is used. num_parallel_reads: (Optional.) A `tf.int64` scalar representing the number of files to read in parallel. If greater than one, the records of files read in parallel are outputted in an interleaved order. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value greater than one to parallelize the I/O. If `None`, files will be read sequentially. Raises: TypeError: If any argument does not have the expected type. ValueError: If any argument does not have the expected shape. """ filenames = _create_or_validate_filenames_dataset(filenames) self._filenames = filenames self._compression_type = compression_type self._buffer_size = buffer_size self._num_parallel_reads = num_parallel_reads def creator_fn(filename): return _TFRecordDataset(filename, compression_type, buffer_size) self._impl = _create_dataset_reader(creator_fn, filenames, num_parallel_reads) variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access super(TFRecordDatasetV2, self).__init__(variant_tensor) def _clone(self, filenames=None, compression_type=None, buffer_size=None, num_parallel_reads=None): return TFRecordDatasetV2(filenames or self._filenames, compression_type or self._compression_type, buffer_size or self._buffer_size, num_parallel_reads or self._num_parallel_reads) def _inputs(self): return self._impl._inputs() # pylint: disable=protected-access @property def element_spec(self): return tensor_spec.TensorSpec([], dtypes.string) @tf_export(v1=["data.TFRecordDataset"]) class TFRecordDatasetV1(dataset_ops.DatasetV1Adapter): """A `Dataset` comprising records from one or more TFRecord files.""" def __init__(self, filenames, compression_type=None, buffer_size=None, num_parallel_reads=None): wrapped = TFRecordDatasetV2( filenames, compression_type, buffer_size, num_parallel_reads) super(TFRecordDatasetV1, self).__init__(wrapped) __init__.__doc__ = TFRecordDatasetV2.__init__.__doc__ def _clone(self, filenames=None, compression_type=None, buffer_size=None, num_parallel_reads=None): # pylint: disable=protected-access return TFRecordDatasetV1( filenames or self._dataset._filenames, compression_type or self._dataset._compression_type, buffer_size or self._dataset._buffer_size, num_parallel_reads or self._dataset._num_parallel_reads) @property def _filenames(self): return self._dataset._filenames # pylint: disable=protected-access @_filenames.setter def _filenames(self, value): self._dataset._filenames = value # pylint: disable=protected-access class _FixedLengthRecordDataset(dataset_ops.DatasetSource): """A `Dataset` of fixed-length records from one or more binary files.""" def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None): """Creates a `FixedLengthRecordDataset`. Args: filenames: A `tf.string` tensor containing one or more filenames. record_bytes: A `tf.int64` scalar representing the number of bytes in each record. header_bytes: (Optional.) A `tf.int64` scalar representing the number of bytes to skip at the start of a file. footer_bytes: (Optional.) A `tf.int64` scalar representing the number of bytes to ignore at the end of a file. buffer_size: (Optional.) A `tf.int64` scalar representing the number of bytes to buffer when reading. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. """ self._filenames = filenames self._record_bytes = ops.convert_to_tensor( record_bytes, dtype=dtypes.int64, name="record_bytes") self._header_bytes = convert.optional_param_to_tensor( "header_bytes", header_bytes) self._footer_bytes = convert.optional_param_to_tensor( "footer_bytes", footer_bytes) self._buffer_size = convert.optional_param_to_tensor( "buffer_size", buffer_size, _DEFAULT_READER_BUFFER_SIZE_BYTES) self._compression_type = convert.optional_param_to_tensor( "compression_type", compression_type, argument_default="", argument_dtype=dtypes.string) variant_tensor = gen_dataset_ops.fixed_length_record_dataset_v2( self._filenames, self._header_bytes, self._record_bytes, self._footer_bytes, self._buffer_size, self._compression_type) super(_FixedLengthRecordDataset, self).__init__(variant_tensor) @property def element_spec(self): return tensor_spec.TensorSpec([], dtypes.string) @tf_export("data.FixedLengthRecordDataset", v1=[]) class FixedLengthRecordDatasetV2(dataset_ops.DatasetSource): """A `Dataset` of fixed-length records from one or more binary files.""" def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None, num_parallel_reads=None): """Creates a `FixedLengthRecordDataset`. Args: filenames: A `tf.string` tensor or `tf.data.Dataset` containing one or more filenames. record_bytes: A `tf.int64` scalar representing the number of bytes in each record. header_bytes: (Optional.) A `tf.int64` scalar representing the number of bytes to skip at the start of a file. footer_bytes: (Optional.) A `tf.int64` scalar representing the number of bytes to ignore at the end of a file. buffer_size: (Optional.) A `tf.int64` scalar representing the number of bytes to buffer when reading. compression_type: (Optional.) A `tf.string` scalar evaluating to one of `""` (no compression), `"ZLIB"`, or `"GZIP"`. num_parallel_reads: (Optional.) A `tf.int64` scalar representing the number of files to read in parallel. If greater than one, the records of files read in parallel are outputted in an interleaved order. If your input pipeline is I/O bottlenecked, consider setting this parameter to a value greater than one to parallelize the I/O. If `None`, files will be read sequentially. """ filenames = _create_or_validate_filenames_dataset(filenames) self._filenames = filenames self._record_bytes = record_bytes self._header_bytes = header_bytes self._footer_bytes = footer_bytes self._buffer_size = buffer_size self._compression_type = compression_type def creator_fn(filename): return _FixedLengthRecordDataset(filename, record_bytes, header_bytes, footer_bytes, buffer_size, compression_type) self._impl = _create_dataset_reader(creator_fn, filenames, num_parallel_reads) variant_tensor = self._impl._variant_tensor # pylint: disable=protected-access super(FixedLengthRecordDatasetV2, self).__init__(variant_tensor) @property def element_spec(self): return tensor_spec.TensorSpec([], dtypes.string) @tf_export(v1=["data.FixedLengthRecordDataset"]) class FixedLengthRecordDatasetV1(dataset_ops.DatasetV1Adapter): """A `Dataset` of fixed-length records from one or more binary files.""" def __init__(self, filenames, record_bytes, header_bytes=None, footer_bytes=None, buffer_size=None, compression_type=None, num_parallel_reads=None): wrapped = FixedLengthRecordDatasetV2( filenames, record_bytes, header_bytes, footer_bytes, buffer_size, compression_type, num_parallel_reads) super(FixedLengthRecordDatasetV1, self).__init__(wrapped) __init__.__doc__ = FixedLengthRecordDatasetV2.__init__.__doc__ @property def _filenames(self): return self._dataset._filenames # pylint: disable=protected-access @_filenames.setter def _filenames(self, value): self._dataset._filenames = value # pylint: disable=protected-access # TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep # these aliases in place. FixedLengthRecordDataset = FixedLengthRecordDatasetV1 TFRecordDataset = TFRecordDatasetV1 TextLineDataset = TextLineDatasetV1
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/ops/readers.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python wrappers for Iterators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import threading import warnings from tensorflow.python.data.ops import optional_ops from tensorflow.python.data.util import nest from tensorflow.python.data.util import structure from tensorflow.python.eager import context from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import type_spec from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.training.saver import BaseSaverBuilder from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import deprecation from tensorflow.python.util.tf_export import tf_export # NOTE(mrry): It is legitimate to call `Iterator.get_next()` multiple # times, e.g. when you are distributing different elements to multiple # devices in a single step. However, a common pitfall arises when # users call `Iterator.get_next()` in each iteration of their training # loop. `Iterator.get_next()` adds ops to the graph, and executing # each op allocates resources (including threads); as a consequence, # invoking it in every iteration of a training loop causes slowdown # and eventual resource exhaustion. To guard against this outcome, we # log a warning when the number of uses crosses a threshold of suspicion. GET_NEXT_CALL_WARNING_THRESHOLD = 32 GET_NEXT_CALL_WARNING_MESSAGE = ( "An unusually high number of `Iterator.get_next()` calls was detected. " "This often indicates that `Iterator.get_next()` is being called inside " "a training loop, which will cause gradual slowdown and eventual resource " "exhaustion. If this is the case, restructure your code to call " "`next_element = iterator.get_next()` once outside the loop, and use " "`next_element` as the input to some computation that is invoked inside " "the loop.") # Collection of all IteratorResources in the `Graph`. GLOBAL_ITERATORS = "iterators" def _device_stack_is_empty(): if context.executing_eagerly(): return context.context().device_name is None # pylint: disable=protected-access device_stack = ops.get_default_graph()._device_functions_outer_to_inner # pylint: enable=protected-access return not bool(device_stack) @tf_export(v1=["data.Iterator"]) class Iterator(trackable.Trackable): """Represents the state of iterating through a `Dataset`.""" def __init__(self, iterator_resource, initializer, output_types, output_shapes, output_classes): """Creates a new iterator from the given iterator resource. Note: Most users will not call this initializer directly, and will instead use `Dataset.make_initializable_iterator()` or `Dataset.make_one_shot_iterator()`. Args: iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the iterator. initializer: A `tf.Operation` that should be run to initialize this iterator. output_types: A nested structure of `tf.DType` objects corresponding to each component of an element of this iterator. output_shapes: A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this iterator. output_classes: A nested structure of Python `type` objects corresponding to each component of an element of this iterator. """ self._iterator_resource = iterator_resource self._initializer = initializer if (output_types is None or output_shapes is None or output_classes is None): raise ValueError("If `structure` is not specified, all of " "`output_types`, `output_shapes`, and `output_classes`" " must be specified.") self._element_spec = structure.convert_legacy_structure( output_types, output_shapes, output_classes) self._flat_tensor_shapes = structure.get_flat_tensor_shapes( self._element_spec) self._flat_tensor_types = structure.get_flat_tensor_types( self._element_spec) self._string_handle = gen_dataset_ops.iterator_to_string_handle( self._iterator_resource) self._get_next_call_count = 0 ops.add_to_collection(GLOBAL_ITERATORS, self._iterator_resource) @staticmethod def from_structure(output_types, output_shapes=None, shared_name=None, output_classes=None): """Creates a new, uninitialized `Iterator` with the given structure. This iterator-constructing method can be used to create an iterator that is reusable with many different datasets. The returned iterator is not bound to a particular dataset, and it has no `initializer`. To initialize the iterator, run the operation returned by `Iterator.make_initializer(dataset)`. The following is an example ```python iterator = Iterator.from_structure(tf.int64, tf.TensorShape([])) dataset_range = Dataset.range(10) range_initializer = iterator.make_initializer(dataset_range) dataset_evens = dataset_range.filter(lambda x: x % 2 == 0) evens_initializer = iterator.make_initializer(dataset_evens) # Define a model based on the iterator; in this example, the model_fn # is expected to take scalar tf.int64 Tensors as input (see # the definition of 'iterator' above). prediction, loss = model_fn(iterator.get_next()) # Train for `num_epochs`, where for each epoch, we first iterate over # dataset_range, and then iterate over dataset_evens. for _ in range(num_epochs): # Initialize the iterator to `dataset_range` sess.run(range_initializer) while True: try: pred, loss_val = sess.run([prediction, loss]) except tf.errors.OutOfRangeError: break # Initialize the iterator to `dataset_evens` sess.run(evens_initializer) while True: try: pred, loss_val = sess.run([prediction, loss]) except tf.errors.OutOfRangeError: break ``` Args: output_types: A nested structure of `tf.DType` objects corresponding to each component of an element of this dataset. output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset. If omitted, each component will have an unconstrainted shape. shared_name: (Optional.) If non-empty, this iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). output_classes: (Optional.) A nested structure of Python `type` objects corresponding to each component of an element of this iterator. If omitted, each component is assumed to be of type `tf.Tensor`. Returns: An `Iterator`. Raises: TypeError: If the structures of `output_shapes` and `output_types` are not the same. """ output_types = nest.map_structure(dtypes.as_dtype, output_types) if output_shapes is None: output_shapes = nest.map_structure( lambda _: tensor_shape.TensorShape(None), output_types) else: output_shapes = nest.map_structure_up_to(output_types, tensor_shape.as_shape, output_shapes) if output_classes is None: output_classes = nest.map_structure(lambda _: ops.Tensor, output_types) nest.assert_same_structure(output_types, output_shapes) output_structure = structure.convert_legacy_structure( output_types, output_shapes, output_classes) if shared_name is None: shared_name = "" if _device_stack_is_empty(): with ops.device("/cpu:0"): iterator_resource = gen_dataset_ops.iterator_v2( container="", shared_name=shared_name, output_types=structure.get_flat_tensor_types( output_structure), output_shapes=structure.get_flat_tensor_shapes( output_structure)) else: iterator_resource = gen_dataset_ops.iterator_v2( container="", shared_name=shared_name, output_types=structure.get_flat_tensor_types(output_structure), output_shapes=structure.get_flat_tensor_shapes( output_structure)) return Iterator(iterator_resource, None, output_types, output_shapes, output_classes) @staticmethod def from_string_handle(string_handle, output_types, output_shapes=None, output_classes=None): """Creates a new, uninitialized `Iterator` based on the given handle. This method allows you to define a "feedable" iterator where you can choose between concrete iterators by feeding a value in a `tf.Session.run` call. In that case, `string_handle` would be a `tf.compat.v1.placeholder`, and you would feed it with the value of `tf.data.Iterator.string_handle` in each step. For example, if you had two iterators that marked the current position in a training dataset and a test dataset, you could choose which to use in each step as follows: ```python train_iterator = tf.data.Dataset(...).make_one_shot_iterator() train_iterator_handle = sess.run(train_iterator.string_handle()) test_iterator = tf.data.Dataset(...).make_one_shot_iterator() test_iterator_handle = sess.run(test_iterator.string_handle()) handle = tf.compat.v1.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle( handle, train_iterator.output_types) next_element = iterator.get_next() loss = f(next_element) train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle}) test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle}) ``` Args: string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates to a handle produced by the `Iterator.string_handle()` method. output_types: A nested structure of `tf.DType` objects corresponding to each component of an element of this dataset. output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset. If omitted, each component will have an unconstrainted shape. output_classes: (Optional.) A nested structure of Python `type` objects corresponding to each component of an element of this iterator. If omitted, each component is assumed to be of type `tf.Tensor`. Returns: An `Iterator`. """ output_types = nest.map_structure(dtypes.as_dtype, output_types) if output_shapes is None: output_shapes = nest.map_structure( lambda _: tensor_shape.TensorShape(None), output_types) else: output_shapes = nest.map_structure_up_to(output_types, tensor_shape.as_shape, output_shapes) if output_classes is None: output_classes = nest.map_structure(lambda _: ops.Tensor, output_types) nest.assert_same_structure(output_types, output_shapes) output_structure = structure.convert_legacy_structure( output_types, output_shapes, output_classes) string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string) if _device_stack_is_empty(): with ops.device("/cpu:0"): iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2( string_handle, output_types=structure.get_flat_tensor_types(output_structure), output_shapes=structure.get_flat_tensor_shapes(output_structure)) else: iterator_resource = gen_dataset_ops.iterator_from_string_handle_v2( string_handle, output_types=structure.get_flat_tensor_types(output_structure), output_shapes=structure.get_flat_tensor_shapes(output_structure)) return Iterator(iterator_resource, None, output_types, output_shapes, output_classes) @property def initializer(self): """A `tf.Operation` that should be run to initialize this iterator. Returns: A `tf.Operation` that should be run to initialize this iterator Raises: ValueError: If this iterator initializes itself automatically. """ if self._initializer is not None: return self._initializer else: # TODO(mrry): Consider whether one-shot iterators should have # initializers that simply reset their state to the beginning. raise ValueError("Iterator does not have an initializer.") def make_initializer(self, dataset, name=None): """Returns a `tf.Operation` that initializes this iterator on `dataset`. Args: dataset: A `Dataset` with compatible structure to this iterator. name: (Optional.) A name for the created operation. Returns: A `tf.Operation` that can be run to initialize this iterator on the given `dataset`. Raises: TypeError: If `dataset` and this iterator do not have a compatible element structure. """ with ops.name_scope(name, "make_initializer") as name: # NOTE(mrry): Cannot depend on `dataset_ops.get_legacy_output*()` due # to that creating a circular dependency. # pylint: disable=protected-access dataset_output_types = nest.map_structure( lambda component_spec: component_spec._to_legacy_output_types(), dataset.element_spec) dataset_output_shapes = nest.map_structure( lambda component_spec: component_spec._to_legacy_output_shapes(), dataset.element_spec) dataset_output_classes = nest.map_structure( lambda component_spec: component_spec._to_legacy_output_classes(), dataset.element_spec) # pylint: enable=protected-access nest.assert_same_structure(self.output_types, dataset_output_types) nest.assert_same_structure(self.output_shapes, dataset_output_shapes) for iterator_class, dataset_class in zip( nest.flatten(self.output_classes), nest.flatten(dataset_output_classes)): if iterator_class is not dataset_class: raise TypeError( "Expected output classes %r but got dataset with output class %r." % (self.output_classes, dataset_output_classes)) for iterator_dtype, dataset_dtype in zip( nest.flatten(self.output_types), nest.flatten(dataset_output_types)): if iterator_dtype != dataset_dtype: raise TypeError( "Expected output types %r but got dataset with output types %r." % (self.output_types, dataset_output_types)) for iterator_shape, dataset_shape in zip( nest.flatten(self.output_shapes), nest.flatten( dataset_output_shapes)): if not iterator_shape.is_compatible_with(dataset_shape): raise TypeError("Expected output shapes compatible with %r but got " "dataset with output shapes %r." % (self.output_shapes, dataset_output_shapes)) with ops.device(self._iterator_resource.device): # pylint: disable=protected-access return gen_dataset_ops.make_iterator( dataset._variant_tensor, self._iterator_resource, name=name) def get_next(self, name=None): """Returns a nested structure of `tf.Tensor`s representing the next element. In graph mode, you should typically call this method *once* and use its result as the input to another computation. A typical loop will then call `tf.Session.run` on the result of that computation. The loop will terminate when the `Iterator.get_next()` operation raises `tf.errors.OutOfRangeError`. The following skeleton shows how to use this method when building a training loop: ```python dataset = ... # A `tf.data.Dataset` object. iterator = dataset.make_initializable_iterator() next_element = iterator.get_next() # Build a TensorFlow graph that does something with each element. loss = model_function(next_element) optimizer = ... # A `tf.compat.v1.train.Optimizer` object. train_op = optimizer.minimize(loss) with tf.compat.v1.Session() as sess: try: while True: sess.run(train_op) except tf.errors.OutOfRangeError: pass ``` NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g. when you are distributing different elements to multiple devices in a single step. However, a common pitfall arises when users call `Iterator.get_next()` in each iteration of their training loop. `Iterator.get_next()` adds ops to the graph, and executing each op allocates resources (including threads); as a consequence, invoking it in every iteration of a training loop causes slowdown and eventual resource exhaustion. To guard against this outcome, we log a warning when the number of uses crosses a fixed threshold of suspiciousness. Args: name: (Optional.) A name for the created operation. Returns: A nested structure of `tf.Tensor` objects. """ self._get_next_call_count += 1 if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD: warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE) with ops.device(self._iterator_resource.device): # pylint: disable=protected-access flat_ret = gen_dataset_ops.iterator_get_next( self._iterator_resource, output_types=self._flat_tensor_types, output_shapes=self._flat_tensor_shapes, name=name) return structure.from_tensor_list(self._element_spec, flat_ret) def get_next_as_optional(self): # pylint: disable=protected-access return optional_ops._OptionalImpl( gen_dataset_ops.iterator_get_next_as_optional( self._iterator_resource, output_types=structure.get_flat_tensor_types(self.element_spec), output_shapes=structure.get_flat_tensor_shapes( self.element_spec)), self.element_spec) def string_handle(self, name=None): """Returns a string-valued `tf.Tensor` that represents this iterator. Args: name: (Optional.) A name for the created operation. Returns: A scalar `tf.Tensor` of type `tf.string`. """ if name is None: return self._string_handle else: return gen_dataset_ops.iterator_to_string_handle( self._iterator_resource, name=name) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_classes(iterator)`.") def output_classes(self): """Returns the class of each component of an element of this iterator. The expected values are `tf.Tensor` and `tf.SparseTensor`. Returns: A nested structure of Python `type` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access self._element_spec) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_shapes(iterator)`.") def output_shapes(self): """Returns the shape of each component of an element of this iterator. Returns: A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access self._element_spec) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_types(iterator)`.") def output_types(self): """Returns the type of each component of an element of this iterator. Returns: A nested structure of `tf.DType` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access self._element_spec) @property def element_spec(self): """The type specification of an element of this iterator. Returns: A nested structure of `tf.TypeSpec` objects matching the structure of an element of this iterator and specifying the type of individual components. """ return self._element_spec def _gather_saveables_for_checkpoint(self): def _saveable_factory(name): return _IteratorSaveable(self._iterator_resource, name) return {"ITERATOR": _saveable_factory} _uid_counter = 0 _uid_lock = threading.Lock() def _generate_shared_name(prefix): with _uid_lock: global _uid_counter uid = _uid_counter _uid_counter += 1 return "{}{}".format(prefix, uid) class IteratorResourceDeleter(object): """An object which cleans up an iterator resource handle. An alternative to defining a __del__ method on an object. Even if the parent object is part of a reference cycle, the cycle will be collectable. """ def __init__(self, handle, device, deleter): self._deleter = deleter self._handle = handle self._device = device self._eager_mode = context.executing_eagerly() def __del__(self): with ops.device(self._device): # Make sure the resource is deleted in the same mode as it was created in. if self._eager_mode: with context.eager_mode(): gen_dataset_ops.delete_iterator( handle=self._handle, deleter=self._deleter) else: with context.graph_mode(): gen_dataset_ops.delete_iterator( handle=self._handle, deleter=self._deleter) class IteratorV2(trackable.Trackable, composite_tensor.CompositeTensor): """An iterator producing tf.Tensor objects from a tf.data.Dataset.""" def __init__(self, dataset=None, components=None, element_spec=None): """Creates a new iterator from the given dataset. If `dataset` is not specified, the iterator will be created from the given tensor components and element structure. In particular, the alternative for constructing the iterator is used when the iterator is reconstructed from it `CompositeTensor` representation. Args: dataset: A `tf.data.Dataset` object. components: Tensor components to construct the iterator from. element_spec: A nested structure of `TypeSpec` objects that represents the type specification of elements of the iterator. Raises: ValueError: If `dataset` is not provided and either `components` or `element_spec` is not provided. Or `dataset` is provided and either `components` and `element_spec` is provided. """ error_message = "Either `dataset` or both `components` and " "`element_spec` need to be provided." self._device = context.context().device_name if dataset is None: if (components is None or element_spec is None): raise ValueError(error_message) # pylint: disable=protected-access self._element_spec = element_spec self._flat_output_types = structure.get_flat_tensor_types( self._element_spec) self._flat_output_shapes = structure.get_flat_tensor_shapes( self._element_spec) self._iterator_resource, self._deleter = components # Delete the resource when this object is deleted self._resource_deleter = IteratorResourceDeleter( handle=self._iterator_resource, device=self._device, deleter=self._deleter) else: if (components is not None or element_spec is not None): raise ValueError(error_message) if (_device_stack_is_empty() or context.context().device_spec.device_type != "CPU"): with ops.device("/cpu:0"): self._create_iterator(dataset) else: self._create_iterator(dataset) def _create_iterator(self, dataset): # pylint: disable=protected-access dataset = dataset._apply_options() ds_variant = dataset._variant_tensor self._element_spec = dataset.element_spec self._flat_output_types = structure.get_flat_tensor_types( self._element_spec) self._flat_output_shapes = structure.get_flat_tensor_shapes( self._element_spec) with ops.device(ds_variant.device): self._iterator_resource, self._deleter = ( gen_dataset_ops.anonymous_iterator_v2( output_types=self._flat_output_types, output_shapes=self._flat_output_shapes)) gen_dataset_ops.make_iterator(ds_variant, self._iterator_resource) # Delete the resource when this object is deleted self._resource_deleter = IteratorResourceDeleter( handle=self._iterator_resource, device=self._device, deleter=self._deleter) def __iter__(self): return self def __next__(self): # For Python 3 compatibility return self.next() def _next_internal(self): """Returns a nested structure of `tf.Tensor`s containing the next element. """ if not context.executing_eagerly(): with ops.device(self._device): ret = gen_dataset_ops.iterator_get_next( self._iterator_resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) return structure.from_compatible_tensor_list(self._element_spec, ret) # This runs in sync mode as iterators use an error status to communicate # that there is no more data to iterate over. # TODO(b/77291417): Fix with context.execution_mode(context.SYNC): with ops.device(self._device): # TODO(ashankar): Consider removing this ops.device() contextmanager # and instead mimic ops placement in graphs: Operations on resource # handles execute on the same device as where the resource is placed. # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next` # because in eager mode this code will run synchronously on the calling # thread. Therefore we do not need to make a defensive context switch # to a background thread, and can achieve a small constant performance # boost by invoking the iterator synchronously. ret = gen_dataset_ops.iterator_get_next_sync( self._iterator_resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) try: # Fast path for the case `self._structure` is not a nested structure. return self._element_spec._from_compatible_tensor_list(ret) # pylint: disable=protected-access except AttributeError: return structure.from_compatible_tensor_list(self._element_spec, ret) @property def _type_spec(self): return IteratorSpec(self.element_spec) def next(self): """Returns a nested structure of `Tensor`s containing the next element.""" try: return self._next_internal() except errors.OutOfRangeError: raise StopIteration @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_classes(iterator)`.") def output_classes(self): """Returns the class of each component of an element of this iterator. The expected values are `tf.Tensor` and `tf.SparseTensor`. Returns: A nested structure of Python `type` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access self._element_spec) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_shapes(iterator)`.") def output_shapes(self): """Returns the shape of each component of an element of this iterator. Returns: A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access self._element_spec) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_types(iterator)`.") def output_types(self): """Returns the type of each component of an element of this iterator. Returns: A nested structure of `tf.DType` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access self._element_spec) @property def element_spec(self): """The type specification of an element of this iterator. Returns: A nested structure of `tf.TypeSpec` objects matching the structure of an element of this iterator and specifying the type of individual components. """ return self._element_spec def get_next(self, name=None): """Returns a nested structure of `tf.Tensor`s containing the next element. Args: name: (Optional.) A name for the created operation. Currently unused. Returns: A nested structure of `tf.Tensor` objects. Raises: `tf.errors.OutOfRangeError`: If the end of the dataset has been reached. """ del name return self._next_internal() def _gather_saveables_for_checkpoint(self): def _saveable_factory(name): return _IteratorSaveable(self._iterator_resource, name) return {"ITERATOR": _saveable_factory} # TODO(jsimsa): Export this as "tf.data.IteratorSpec". class IteratorSpec(type_spec.TypeSpec): """Type specification for `tf.data.Iterator`.""" __slots__ = ["_element_spec"] def __init__(self, element_spec): self._element_spec = element_spec @property def value_type(self): return IteratorV2 def _serialize(self): return (self._element_spec,) @property def _component_specs(self): return ( tensor_spec.TensorSpec([], dtypes.resource), tensor_spec.TensorSpec([], dtypes.scalar), ) def _to_components(self, value): return (value._iterator_resource, value._deleter) # pylint: disable=protected-access def _from_components(self, components): return IteratorV2( dataset=None, components=components, element_spec=self._element_spec) @staticmethod def from_value(value): return IteratorSpec(value.element_spec) # pylint: disable=protected-access # TODO(b/71645805): Expose trackable stateful objects from dataset. class _IteratorSaveable(BaseSaverBuilder.SaveableObject): """SaveableObject for saving/restoring iterator state.""" def __init__(self, iterator_resource, name): serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource) specs = [ BaseSaverBuilder.SaveSpec(serialized_iterator, "", name + "_STATE") ] super(_IteratorSaveable, self).__init__(iterator_resource, specs, name) def restore(self, restored_tensors, restored_shapes): with ops.colocate_with(self.op): return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0]) @tf_export("data.experimental.get_next_as_optional") def get_next_as_optional(iterator): """Returns an `Optional` that contains the next value from the iterator. If `iterator` has reached the end of the sequence, the returned `Optional` will have no value. Args: iterator: A `tf.compat.v1.data.Iterator` object. Returns: An `Optional` object representing the next value from the iterator (if it has one) or no value. """ # pylint: disable=protected-access return optional_ops._OptionalImpl( gen_dataset_ops.iterator_get_next_as_optional( iterator._iterator_resource, output_types=structure.get_flat_tensor_types(iterator.element_spec), output_shapes=structure.get_flat_tensor_shapes( iterator.element_spec)), iterator.element_spec)
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/ops/iterator_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python wrapper for prefetching_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.util import structure from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import resource_variable_ops class _PerDeviceGenerator(dataset_ops.DatasetV2): """A `dummy` generator dataset.""" def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id, source_device, element_spec): self._element_spec = element_spec multi_device_iterator_string_handle = ( gen_dataset_ops.multi_device_iterator_to_string_handle( multi_device_iterator_resource)) # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun(autograph=False) # Pure graph code. def _init_func(): return multi_device_iterator_string_handle init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun(autograph=False) # Pure graph code. def _remote_init_func(): return functional_ops.remote_call( target=source_device, args=init_func_concrete.captured_inputs, Tout=[dtypes.string], f=init_func_concrete) self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access self._init_captured_args = self._init_func.captured_inputs # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], autograph=False) # Pure graph code. def _next_func(string_handle): # pylint: disable=protected-access multi_device_iterator = ( gen_dataset_ops.multi_device_iterator_from_string_handle( string_handle=string_handle, output_types=structure.get_flat_tensor_types(self._element_spec), output_shapes=structure.get_flat_tensor_shapes( self._element_spec))) return gen_dataset_ops.multi_device_iterator_get_next_from_shard( multi_device_iterator=multi_device_iterator, shard_num=shard_num, incarnation_id=incarnation_id, output_types=structure.get_flat_tensor_types(self._element_spec), output_shapes=structure.get_flat_tensor_shapes(self._element_spec)) next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun_with_attributes( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], attributes={"experimental_ints_on_device": True}, autograph=False) # Pure graph code. def _remote_next_func(string_handle): return functional_ops.remote_call( target=source_device, args=[string_handle] + next_func_concrete.captured_inputs, Tout=structure.get_flat_tensor_types(self._element_spec), f=next_func_concrete) self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access self._next_captured_args = self._next_func.captured_inputs self._incarnation_id_index = -1 for i, arg in enumerate(self._next_captured_args): if arg is incarnation_id: self._incarnation_id_index = i # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], autograph=False) # Pure graph code. def _finalize_func(unused_string_handle): return array_ops.constant(0, dtypes.int64) finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access # TODO(b/124254153): Enable autograph once the overhead is low enough. @function.defun( input_signature=[tensor_spec.TensorSpec([], dtypes.string)], autograph=False) # Pure graph code. def _remote_finalize_func(string_handle): return functional_ops.remote_call( target=source_device, args=[string_handle] + finalize_func_concrete.captured_inputs, Tout=[dtypes.int64], f=finalize_func_concrete) self._finalize_func = _remote_finalize_func._get_concrete_function_internal( # pylint: disable=protected-access ) self._finalize_captured_args = self._finalize_func.captured_inputs variant_tensor = gen_dataset_ops.generator_dataset( self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **self._flat_structure) super(_PerDeviceGenerator, self).__init__(variant_tensor) def _inputs(self): # TODO(b/116506223): Determine which datasets should be used as inputs here. return [] @property def element_spec(self): return self._element_spec class _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2): """Creates a _PerDeviceGenerator-like dataset with a new incarnation_id. Re-uses the functions from the provided per_device_dataset and just switches out the function argument corresponding to the incarnation_id. """ def __init__(self, per_device_dataset, incarnation_id): # pylint: disable=protected-access self._element_spec = per_device_dataset.element_spec self._init_func = per_device_dataset._init_func self._init_captured_args = self._init_func.captured_inputs self._next_func = per_device_dataset._next_func self._next_captured_args = per_device_dataset._next_captured_args # The captured arguments to the next_func are string_handle, incarnation_id. # We update the incarnation id to the new one. self._next_captured_args[ per_device_dataset._incarnation_id_index] = incarnation_id self._finalize_func = per_device_dataset._finalize_func self._finalize_captured_args = per_device_dataset._finalize_captured_args variant_tensor = gen_dataset_ops.generator_dataset( self._init_captured_args, self._next_captured_args, self._finalize_captured_args, init_func=self._init_func, next_func=self._next_func, finalize_func=self._finalize_func, **self._flat_structure) super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor) def _inputs(self): # TODO(b/116506223): Determine which datasets should be used as inputs here. return [] @property def element_spec(self): return self._element_spec def _create_device_dataset(prototype_ds, incarnation_id, prefetch_buffer_size, experimental_slack): """Uses _prototype_device_datasets[i] to build a dataset for the device.""" ds = _ReincarnatedPerDeviceGenerator(prototype_ds, incarnation_id) if prefetch_buffer_size > 0: if experimental_slack: ds = dataset_ops.PrefetchDataset(ds, prefetch_buffer_size, slack_period=1) else: ds = ds.prefetch(prefetch_buffer_size) # TODO(jsimsa): Enable auto-tuning and optimizations when supported for # non-CPU devices. options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.autotune = False ds = ds.with_options(options) return ds class MultiDeviceIterator(object): """An iterator over multiple devices.""" def __init__(self, dataset, devices, max_buffer_size=1, prefetch_buffer_size=1, source_device="/cpu:0"): """Constructs a MultiDeviceIterator. Args: dataset: The input dataset to be iterated over. devices: The list of devices to fetch data to. max_buffer_size: Maximum size of the host side per device buffer to keep. prefetch_buffer_size: if > 1, then we setup a buffer on each device to prefetch into. source_device: The host device to place the `dataset` on. In order to prevent deadlocks, if the prefetch_buffer_size is greater than the max_buffer_size, we set the max_buffer_size to prefetch_buffer_size. """ options = dataset_ops.Options() options.experimental_distribute.num_devices = len(devices) dataset = dataset.with_options(options) self._dataset = dataset._apply_options() # pylint: disable=protected-access self._experimental_slack = dataset.options().experimental_slack self._devices = devices self._source_device = source_device self._source_device_tensor = ops.convert_to_tensor(source_device) self._max_buffer_size = max_buffer_size self._prefetch_buffer_size = prefetch_buffer_size if self._prefetch_buffer_size > self._max_buffer_size: self._max_buffer_size = self._prefetch_buffer_size # Create the MultiDeviceIterator. with ops.device(self._source_device): # TODO(b/121378567): Get rid of this shared_name hack. shared_name = "" if context.executing_eagerly(): shared_name = context.shared_name() self._multi_device_iterator_resource = ( gen_dataset_ops.multi_device_iterator( devices=self._devices, shared_name=shared_name, container="", **self._dataset._flat_structure)) # pylint: disable=protected-access if context.executing_eagerly(): # Delete the resource when this object is deleted self._resource_deleter = resource_variable_ops.EagerResourceDeleter( handle=self._multi_device_iterator_resource, handle_device=self._source_device) # The incarnation ID is used to ensure consistency between the per-device # iterators and the multi-device iterator. self._incarnation_id = gen_dataset_ops.multi_device_iterator_init( self._dataset._variant_tensor, # pylint: disable=protected-access self._multi_device_iterator_resource, max_buffer_size=self._max_buffer_size) self._prototype_device_datasets = [] for i, device in enumerate(self._devices): with ops.device(device): ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource, self._incarnation_id, self._source_device_tensor, self._dataset.element_spec) self._prototype_device_datasets.append(ds) # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to # initialize the device side of the pipeline. This would allow the # MultiDeviceIterator to choose, for example, to move some transformations # into the device side from its input. It might be useful in rewriting. # Create the per device iterators. self._device_iterators = [] for i, device in enumerate(self._devices): with ops.device(device): ds = _create_device_dataset(self._prototype_device_datasets[i], self._incarnation_id, self._prefetch_buffer_size, self._experimental_slack) if context.executing_eagerly(): self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds)) else: self._device_iterators.append( dataset_ops.make_initializable_iterator( ds, force_deactivate_gpu_prefetching=True)) if not context.executing_eagerly(): device_iterator_initializers = [ iterator.initializer for iterator in self._device_iterators ] self._initializer = control_flow_ops.group(*device_iterator_initializers) def _create_device_dataset(self, i): """Uses _prototype_device_datasets[i] to build a dataset for the device.""" ds = self._prototype_device_datasets[i] ds = _ReincarnatedPerDeviceGenerator(ds, self._incarnation_id) if self._prefetch_buffer_size > 0: if self._experimental_slack: ds = dataset_ops.PrefetchDataset( ds, self._prefetch_buffer_size, slack_period=1) else: ds = ds.prefetch(self._prefetch_buffer_size) # TODO(jsimsa): Enable auto-tuning and optimizations when supported for # non-CPU devices. options = dataset_ops.Options() options.experimental_optimization.apply_default_optimizations = False options.experimental_optimization.autotune = False ds = ds.with_options(options) return ds def get_next(self, device=None): """Returns the next element given a `device`, else returns all in a list.""" if device is not None: index = self._devices.index(device) return self._device_iterators[index].get_next() result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append(self._device_iterators[i].get_next()) return result def get_next_as_optional(self): result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append( iterator_ops.get_next_as_optional(self._device_iterators[i])) return result @property def initializer(self): if context.executing_eagerly(): return control_flow_ops.no_op() return self._initializer def _eager_reset(self): """Resets the MultiDeviceIterator in eager mode.""" if not ops.executing_eagerly_outside_functions(): raise ValueError("Eager reset is only supported in eager mode.") # pylint: disable=protected-access self._incarnation_id = gen_dataset_ops.multi_device_iterator_init( self._dataset._variant_tensor, self._multi_device_iterator_resource, max_buffer_size=self._max_buffer_size) for i, device in enumerate(self._devices): with ops.device(device): ds = _create_device_dataset(self._prototype_device_datasets[i], self._incarnation_id, self._prefetch_buffer_size, self._experimental_slack) # Reset the device iterator resources with the new dataset. ds_variant = ds._variant_tensor gen_dataset_ops.make_iterator( ds_variant, self._device_iterators[i]._iterator_resource) @property def element_spec(self): return self._dataset.element_spec class MultiDeviceIteratorResourceDeleter(object): """An object which cleans up a Multi Device Iterator resource. An alternative to defining a __del__ method on an object. Even if the parent object is part of a reference cycle, the cycle will be collectible. """ def __init__(self, multi_device_iterator, iterators, device, deleter): self._deleter = deleter self._multi_device_iterator = multi_device_iterator self._iterators = iterators self._device = device self._eager_mode = context.executing_eagerly() def __del__(self): with ops.device(self._device): # Make sure the resource is deleted in the same mode as it was created in. # We pass in the iterator handles as inputs to the op to make sure that # this op runs after all the iterators are deleted. if self._eager_mode: with context.eager_mode(): gen_dataset_ops.delete_multi_device_iterator( multi_device_iterator=self._multi_device_iterator, iterators=self._iterators, deleter=self._deleter) else: with context.graph_mode(): gen_dataset_ops.delete_multi_device_iterator( multi_device_iterator=self._multi_device_iterator, iterators=self._iterators, deleter=self._deleter) class MultiDeviceIteratorSpec(type_spec.TypeSpec): """Type specification for `MultiDeviceIteratorV2`.""" __slots__ = ["_devices", "_source_device", "_element_spec"] def __init__(self, devices, source_device, element_spec): self._devices = devices self._source_device = source_device self._element_spec = element_spec @property def value_type(self): return MultiDeviceIteratorV2 def _serialize(self): return (tuple(self._devices), self._source_device, self._element_spec) @property def _component_specs(self): specs = [ tensor_spec.TensorSpec([], dtypes.resource), tensor_spec.TensorSpec([], dtypes.scalar) ] for _ in range(len(self._devices)): specs.append(iterator_ops.IteratorSpec(self._element_spec)) return specs def _to_components(self, value): # pylint: disable=protected-access c = [value._multi_device_iterator_resource, value._deleter] c.extend(value._device_iterators) return c def _from_components(self, components): return MultiDeviceIteratorV2( dataset=None, devices=self._devices, source_device=self._source_device, components=components, element_spec=self._element_spec) @staticmethod def from_value(value): # pylint: disable=protected-access return MultiDeviceIteratorSpec( value._devices, value._source_device, value.element_spec) class MultiDeviceIteratorV2(composite_tensor.CompositeTensor): """An iterator over multiple devices.""" def __init__(self, dataset=None, devices=None, max_buffer_size=1, prefetch_buffer_size=1, source_device="/cpu:0", components=None, element_spec=None): """Constructs a MultiDeviceIteratorV2 object. Args: dataset: The input dataset to be iterated over. devices: The list of devices to fetch data to. max_buffer_size: Maximum size of the host side per device buffer to keep. prefetch_buffer_size: if > 1, then we setup a buffer on each device to prefetch into. source_device: The host device to place the `dataset` on. In order to prevent deadlocks, if the prefetch_buffer_size is greater than the max_buffer_size, we set the max_buffer_size to prefetch_buffer_size. components: Tensor components to construct the MultiDeviceIterator from. element_spec: A nested structure of `TypeSpec` objects that represents the type specification of elements of the iterator. Raises: RuntimeError: If executed in graph mode or outside of function building mode. """ if (not context.executing_eagerly() and not ops.get_default_graph()._building_function): # pylint: disable=protected-access raise RuntimeError("MultiDeviceIteratorV2 is only supported inside of " "tf.function or when eager execution is enabled.") if devices is None: raise ValueError("`devices` must be provided") error_message = "Either `dataset` or both `components` and " "`element_spec` need to be provided." if dataset is None: if (components is None or element_spec is None): raise ValueError(error_message) self._element_spec = element_spec self._devices = devices self._source_device = source_device self._multi_device_iterator_resource = components[0] self._deleter = components[1] self._device_iterators = components[2:] iterator_handles = [] for it in self._device_iterators: iterator_handles.append(it._iterator_resource) # pylint: disable=protected-access else: if (components is not None or element_spec is not None): raise ValueError(error_message) options = dataset_ops.Options() options.experimental_distribute.num_devices = len(devices) dataset = dataset.with_options(options) dataset = dataset._apply_options() # pylint: disable=protected-access self._element_spec = dataset.element_spec experimental_slack = dataset.options().experimental_slack self._devices = devices self._source_device = source_device source_device_tensor = ops.convert_to_tensor(self._source_device) if prefetch_buffer_size > max_buffer_size: max_buffer_size = prefetch_buffer_size # Create the MultiDeviceIterator. with ops.device(self._source_device): self._multi_device_iterator_resource, self._deleter = ( gen_dataset_ops.anonymous_multi_device_iterator( devices=self._devices, **dataset._flat_structure)) # pylint: disable=protected-access # The incarnation ID is used to ensure consistency between the # per-device iterators and the multi-device iterator. incarnation_id = gen_dataset_ops.multi_device_iterator_init( dataset._variant_tensor, # pylint: disable=protected-access self._multi_device_iterator_resource, max_buffer_size=max_buffer_size) prototype_device_datasets = [] for i, device in enumerate(self._devices): with ops.device(device): ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource, incarnation_id, source_device_tensor, dataset.element_spec) prototype_device_datasets.append(ds) # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to # initialize the device side of the pipeline. This would allow the # MultiDeviceIterator to choose, for example, to move some transformations # into the device side from its input. It might be useful in rewriting. # Create the per device iterators. self._device_iterators = [] iterator_handles = [] for i, device in enumerate(self._devices): with ops.device(device): ds = _create_device_dataset(prototype_device_datasets[i], incarnation_id, prefetch_buffer_size, experimental_slack) iterator = iter(ds) self._device_iterators.append(iterator) iterator_handles.append(iterator._iterator_resource) # pylint: disable=protected-access self._resource_deleter = MultiDeviceIteratorResourceDeleter( multi_device_iterator=self._multi_device_iterator_resource, iterators=iterator_handles, device=self._source_device, deleter=self._deleter) def get_next(self, device=None): """Returns the next element given a `device`, else returns all in a list.""" if device is not None: index = self._devices.index(device) return self._device_iterators[index].get_next() result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append(self._device_iterators[i].get_next()) return result def __iter__(self): return self def __next__(self): return self.next() def next(self): try: return self.get_next() except errors.OutOfRangeError: raise StopIteration def get_next_as_optional(self): result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append( iterator_ops.get_next_as_optional(self._device_iterators[i])) return result @property def element_spec(self): return self._element_spec @property def _type_spec(self): return MultiDeviceIteratorSpec(self._devices, self._source_device, self._element_spec)
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/ops/multi_device_iterator_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """An Optional type for representing potentially missing values.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import six from tensorflow.python.data.util import structure from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import type_spec from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.util.tf_export import tf_export @tf_export("data.experimental.Optional") @six.add_metaclass(abc.ABCMeta) class Optional(composite_tensor.CompositeTensor): """Wraps a value that may/may not be present at runtime. An `Optional` can represent the result of an operation that may fail as a value, rather than raising an exception and halting execution. For example, `tf.data.experimental.get_next_as_optional` returns an `Optional` that either contains the next value from a `tf.compat.v1.data.Iterator` if one exists, or a "none" value that indicates the end of the sequence has been reached. `Optional` can only be used by values that are convertible to `Tensor` or `CompositeTensor`. """ @abc.abstractmethod def has_value(self, name=None): """Returns a tensor that evaluates to `True` if this optional has a value. Args: name: (Optional.) A name for the created operation. Returns: A scalar `tf.Tensor` of type `tf.bool`. """ raise NotImplementedError("Optional.has_value()") @abc.abstractmethod def get_value(self, name=None): """Returns the value wrapped by this optional. If this optional does not have a value (i.e. `self.has_value()` evaluates to `False`), this operation will raise `tf.errors.InvalidArgumentError` at runtime. Args: name: (Optional.) A name for the created operation. Returns: The wrapped value. """ raise NotImplementedError("Optional.get_value()") @abc.abstractproperty def value_structure(self): """The structure of the components of this optional. Returns: A `Structure` object representing the structure of the components of this optional. """ raise NotImplementedError("Optional.value_structure") @staticmethod def from_value(value): """Returns an `Optional` that wraps the given value. Args: value: A value to wrap. The value must be convertible to `Tensor` or `CompositeTensor`. Returns: An `Optional` that wraps `value`. """ with ops.name_scope("optional") as scope: with ops.name_scope("value"): value_structure = structure.type_spec_from_value(value) encoded_value = structure.to_tensor_list(value_structure, value) return _OptionalImpl( gen_dataset_ops.optional_from_value(encoded_value, name=scope), value_structure) @staticmethod def none_from_structure(value_structure): """Returns an `Optional` that has no value. NOTE: This method takes an argument that defines the structure of the value that would be contained in the returned `Optional` if it had a value. Args: value_structure: A `Structure` object representing the structure of the components of this optional. Returns: An `Optional` that has no value. """ return _OptionalImpl(gen_dataset_ops.optional_none(), value_structure) class _OptionalImpl(Optional): """Concrete implementation of `tf.data.experimental.Optional`. NOTE(mrry): This implementation is kept private, to avoid defining `Optional.__init__()` in the public API. """ def __init__(self, variant_tensor, value_structure): self._variant_tensor = variant_tensor self._value_structure = value_structure def has_value(self, name=None): return gen_dataset_ops.optional_has_value(self._variant_tensor, name=name) def get_value(self, name=None): # TODO(b/110122868): Consolidate the restructuring logic with similar logic # in `Iterator.get_next()` and `StructuredFunctionWrapper`. with ops.name_scope(name, "OptionalGetValue", [self._variant_tensor]) as scope: return structure.from_tensor_list( self._value_structure, gen_dataset_ops.optional_get_value( self._variant_tensor, name=scope, output_types=structure.get_flat_tensor_types( self._value_structure), output_shapes=structure.get_flat_tensor_shapes( self._value_structure))) @property def value_structure(self): return self._value_structure @property def _type_spec(self): return OptionalSpec.from_value(self) @tf_export( "OptionalSpec", v1=["OptionalSpec", "data.experimental.OptionalStructure"]) class OptionalSpec(type_spec.TypeSpec): """Represents an optional potentially containing a structured value.""" __slots__ = ["_value_structure"] def __init__(self, value_structure): self._value_structure = value_structure @property def value_type(self): return _OptionalImpl def _serialize(self): return (self._value_structure,) @property def _component_specs(self): return [tensor_spec.TensorSpec((), dtypes.variant)] def _to_components(self, value): return [value._variant_tensor] # pylint: disable=protected-access def _from_components(self, flat_value): # pylint: disable=protected-access return _OptionalImpl(flat_value[0], self._value_structure) @staticmethod def from_value(value): return OptionalSpec(value.value_structure) def _to_legacy_output_types(self): return self def _to_legacy_output_shapes(self): return self def _to_legacy_output_classes(self): return self
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/ops/optional_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Python wrappers for Datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import enum import functools import os import threading import warnings import weakref import numpy as np import six from six.moves import queue as Queue # pylint: disable=redefined-builtin from tensorflow.core.framework import graph_pb2 from tensorflow.python import tf2 from tensorflow.python.compat import compat from tensorflow.python.data.experimental.ops import distribute_options from tensorflow.python.data.experimental.ops import optimization_options from tensorflow.python.data.experimental.ops import stats_options from tensorflow.python.data.experimental.ops import threading_options from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.util import nest from tensorflow.python.data.util import options as options_lib from tensorflow.python.data.util import random_seed from tensorflow.python.data.util import sparse from tensorflow.python.data.util import structure from tensorflow.python.data.util import traverse from tensorflow.python.eager import context from tensorflow.python.eager import function as eager_function from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import function from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed as core_random_seed from tensorflow.python.framework import smart_cond from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import tensor_util from tensorflow.python.framework import type_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_dataset_ops from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops from tensorflow.python.ops import gen_io_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import string_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as tracking_base from tensorflow.python.training.tracking import tracking from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import lazy_loader from tensorflow.python.util import nest as tf_nest from tensorflow.python.util.tf_export import tf_export # Loaded lazily due to a circular dependency (roughly # tf.function->wrap_function->dataset->autograph->tf.function). # TODO(b/133251390): Use a regular import. wrap_function = lazy_loader.LazyLoader( "wrap_function", globals(), "tensorflow.python.eager.wrap_function") # TODO(mdan): Create a public API for this. autograph_ctx = lazy_loader.LazyLoader( "autograph_ctx", globals(), "tensorflow.python.autograph.core.ag_ctx") autograph = lazy_loader.LazyLoader( "autograph", globals(), "tensorflow.python.autograph.impl.api") ops.NotDifferentiable("ReduceDataset") # A constant that can be used to enable auto-tuning. AUTOTUNE = -1 tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE") class AutotuneAlgorithm(enum.Enum): HILL_CLIMB = 0 GRADIENT_DESCENT = 1 @tf_export("data.Dataset", v1=[]) @six.add_metaclass(abc.ABCMeta) class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor): """Represents a potentially large set of elements. A `Dataset` can be used to represent an input pipeline as a collection of elements and a "logical plan" of transformations that act on those elements. A dataset contains elements that each have the same (nested) structure and the individual components of the structure can be of any type representable by `tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`, `tf.SparseTensor`, `tf.RaggedTensor`, or `tf.TensorArray`. Example elements: ```python # Integer element a = 1 # Float element b = 2.0 # Tuple element with 2 components c = (1, 2) # Dict element with 3 components d = {"a": (2, 2), "b": 3} # Element containing a dataset e = tf.data.Dataset.from_element(10) ``` """ def __init__(self, variant_tensor): """Creates a DatasetV2 object. This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not take anything in its constructor whereas in the DatasetV2, we expect subclasses to create a variant_tensor and pass it in to the super() call. Args: variant_tensor: A DT_VARIANT tensor that represents the dataset. """ self._variant_tensor_attr = variant_tensor weak_self = weakref.proxy(self) self._variant_tracker = self._track_trackable( _VariantTracker( self._variant_tensor, # _trace_variant_creation only works when executing eagerly, so we # don't want to run it immediately. We also want the _VariantTracker # to have a weak reference to the Dataset to avoid creating # reference cycles and making work for the garbage collector. lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access name="_variant_tracker") self._graph_attr = ops.get_default_graph() @property def _variant_tensor(self): return self._variant_tensor_attr @_variant_tensor.setter def _variant_tensor(self, _): raise ValueError("The _variant_tensor property is read-only") def _as_serialized_graph(self, stateful_whitelist=None): """Produces serialized graph representation of the dataset. Args: stateful_whitelist: Comma separated list of ops whose stateful attribute should be ignored during serialization. Returns: A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a serialized graph. """ if compat.forward_compatible(2019, 9, 10) or stateful_whitelist: return gen_dataset_ops.dataset_to_graph(self._variant_tensor, stateful_whitelist) else: return gen_dataset_ops.dataset_to_graph(self._variant_tensor) def _trace_variant_creation(self): """Traces a function which outputs a variant `tf.Tensor` for this dataset. Note that creating this function involves evaluating an op, and is currently only supported when executing eagerly. Returns: A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`. """ variant = self._variant_tensor if not isinstance(variant, ops.EagerTensor): raise NotImplementedError( "Can only export Datasets which were created executing eagerly. " "Please file a feature request if this is important to you.") with context.eager_mode(), ops.device("CPU"): graph_def = graph_pb2.GraphDef().FromString( self._as_serialized_graph().numpy()) # pylint: disable=protected-access output_node_name = None for node in graph_def.node: if node.op == "_Retval": if output_node_name is not None: raise AssertionError( "Found multiple return values from the dataset's graph, expected " "only one.") output_node_name, = node.input if output_node_name is None: raise AssertionError("Could not find the dataset's output node.") # Add functions used in this Dataset to the function's graph, since they # need to follow it around (and for example be added to a SavedModel which # references the dataset). variant_function = wrap_function.function_from_graph_def( graph_def, inputs=[], outputs=output_node_name + ":0") for used_function in self._functions(): used_function.function.add_to_graph(variant_function.graph) return variant_function @abc.abstractmethod def _inputs(self): """Returns a list of the input datasets of the dataset.""" raise NotImplementedError("Dataset._inputs") @property def _graph(self): return self._graph_attr @_graph.setter def _graph(self, _): raise ValueError("The _graph property is read-only") def _has_captured_ref(self): """Whether this dataset uses a function that captures ref variables. Returns: A boolean, which if true indicates that the dataset or one of its inputs uses a function that captures ref variables. """ if context.executing_eagerly(): # RefVariables are not supported in eager mode return False def is_tensor_or_parent_ref(tensor): if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access return True return any([is_tensor_or_parent_ref(x) for x in tensor.op.inputs]) for fn in self._functions(): if any([is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs]): return True return any( [input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access # TODO(jsimsa): Change this to be the transitive closure of functions used # by this dataset and its inputs. def _functions(self): """Returns a list of functions associated with this dataset. Returns: A list of `StructuredFunctionWrapper` objects. """ return [] def options(self): """Returns the options for this dataset and its inputs. Returns: A `tf.data.Options` object representing the dataset options. """ options = Options() for input_dataset in self._inputs(): input_options = input_dataset.options() if input_options is not None: options = options.merge(input_options) return options def _apply_options(self): """Apply options, such as optimization configuration, to the dataset.""" dataset = self options = self.options() if options.experimental_threading is not None: t_options = options.experimental_threading if t_options.max_intra_op_parallelism is not None: dataset = _MaxIntraOpParallelismDataset( dataset, t_options.max_intra_op_parallelism) if t_options.private_threadpool_size is not None: dataset = _PrivateThreadPoolDataset(dataset, t_options.private_threadpool_size) # pylint: disable=protected-access static_optimizations = options._static_optimizations() static_optimization_configs = options._static_optimization_configs() # pylint: enable=protected-access if static_optimizations: if self._has_captured_ref(): warnings.warn( "tf.data static optimizations are not compatible with tf.Variable. " "The following optimizations will be disabled: %s. To enable " "optimizations, use resource variables instead by calling " "`tf.enable_resource_variables()` at the start of the program." % ", ".join(static_optimizations)) else: dataset = _OptimizeDataset(dataset, static_optimizations, static_optimization_configs) autotune = True algorithm = AutotuneAlgorithm.HILL_CLIMB cpu_budget = 0 # Indicates that all CPU cores should be used. if options.experimental_optimization is not None: if options.experimental_optimization.autotune is False: # pylint: disable=g-bool-id-comparison autotune = False if options.experimental_optimization.autotune_algorithm is not None: algorithm = options.experimental_optimization.autotune_algorithm if options.experimental_optimization.autotune_cpu_budget is not None: cpu_budget = options.experimental_optimization.autotune_cpu_budget if autotune: dataset = _ModelDataset(dataset, algorithm, cpu_budget) if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access dataset, options.experimental_stats.aggregator, options.experimental_stats.prefix, options.experimental_stats.counter_prefix) # TODO: DEKHTIARJonathan - Re-enable once stable. # if os.environ.get("TF_ENABLE_AUTOMATIC_GPU_PREFETCHING", "0") == "1": # from tensorflow.python.distribute import distribution_strategy_context # if not distribution_strategy_context.has_strategy(): # if (options.experimental_optimization.prefetch_to_device is not None and # os.environ.get("TF_DISABLE_AUTOMATIC_GPU_PREFETCHING", "0") == "0"): # from tensorflow.python.data.experimental.ops import prefetching_ops # prefetch_device = options.experimental_optimization.prefetch_to_device # # def prefetch_to_device(target_device, buffer_size=None): # """A transformation that copies dataset elements to the given `target_device`. # # Args: # target_device: The name of a device to which elements will be copied. # buffer_size: (Optional.) The number of elements to buffer on `device`. # Defaults to an automatically chosen value. # # Returns: # A `Dataset` transformation function, which can be passed to # `tf.data.Dataset.apply`. # """ # # def _apply_fn(dataset): # source_device = dataset._variant_tensor.device # if source_device == "": # source_device = "/cpu:0" # return prefetching_ops._CopyToDeviceDataset( # dataset, # target_device=target_device, # source_device=source_device # ).prefetch(buffer_size) # # return _apply_fn # # dataset = dataset.apply( # prefetch_to_device(prefetch_device, buffer_size=1)) # else: # logging.warning("GPU prefetching has been deactivated in your " # "`tf.data` pipeline. It is not compatible with " # "`tf.distribute` API. Please transition to `horovod` for " # "maximum performance.") return dataset def __iter__(self): """Creates an `Iterator` for enumerating the elements of this dataset. The returned iterator implements the Python iterator protocol and therefore can only be used in eager mode. Returns: An `Iterator` over the elements of this dataset. Raises: RuntimeError: If not inside of tf.function and not executing eagerly. """ if context.executing_eagerly() or ops.inside_function(): with ops.device(self._variant_tensor.device): return iterator_ops.IteratorV2(self) else: raise RuntimeError("__iter__() is only supported inside of tf.function " "or when eager execution is enabled.") @abc.abstractproperty def element_spec(self): """The type specification of an element of this dataset. Returns: A nested structure of `tf.TypeSpec` objects matching the structure of an element of this dataset and specifying the type of individual components. """ raise NotImplementedError("Dataset.element_spec") def __repr__(self): output_shapes = nest.map_structure(str, get_legacy_output_shapes(self)) output_shapes = str(output_shapes).replace("'", "") output_types = nest.map_structure(repr, get_legacy_output_types(self)) output_types = str(output_types).replace("'", "") return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes, output_types)) @property def _flat_shapes(self): """Returns a list `tf.TensorShapes`s for the element tensor representation. Returns: A list `tf.TensorShapes`s for the element tensor representation. """ return structure.get_flat_tensor_shapes(self.element_spec) @property def _flat_types(self): """Returns a list `tf.DType`s for the element tensor representation. Returns: A list `tf.DType`s for the element tensor representation. """ return structure.get_flat_tensor_types(self.element_spec) @property def _flat_structure(self): """Helper for setting `output_shapes` and `output_types` attrs of an op. Most dataset op constructors expect `output_shapes` and `output_types` arguments that represent the flattened structure of an element. This helper function generates these attrs as a keyword argument dictionary, allowing `Dataset._variant_tensor` implementations to pass `**self._flat_structure` to the op constructor. Returns: A dictionary of keyword arguments that can be passed to a dataset op constructor. """ return { "output_shapes": self._flat_shapes, "output_types": self._flat_types, } @property def _type_spec(self): return DatasetSpec(self.element_spec) @staticmethod def from_tensors(tensors): """Creates a `Dataset` with a single element, comprising the given tensors. Note that if `tensors` contains a NumPy array, and eager execution is not enabled, the values will be embedded in the graph as one or more `tf.constant` operations. For large datasets (> 1 GB), this can waste memory and run into byte limits of graph serialization. If `tensors` contains one or more large NumPy arrays, consider the alternative described in [this guide](https://tensorflow.org/guide/datasets#consuming_numpy_arrays). Args: tensors: A dataset element. Returns: Dataset: A `Dataset`. """ return TensorDataset(tensors) @staticmethod def from_tensor_slices(tensors): """Creates a `Dataset` whose elements are slices of the given tensors. Note that if `tensors` contains a NumPy array, and eager execution is not enabled, the values will be embedded in the graph as one or more `tf.constant` operations. For large datasets (> 1 GB), this can waste memory and run into byte limits of graph serialization. If `tensors` contains one or more large NumPy arrays, consider the alternative described in [this guide]( https://tensorflow.org/guide/datasets#consuming_numpy_arrays). Args: tensors: A dataset element, with each component having the same size in the 0th dimension. Returns: Dataset: A `Dataset`. """ return TensorSliceDataset(tensors) class _GeneratorState(object): """Stores outstanding iterators created from a Python generator. This class keeps track of potentially multiple iterators that may have been created from a generator, e.g. in the case that the dataset is repeated, or nested within a parallel computation. """ def __init__(self, generator): self._generator = generator self._lock = threading.Lock() self._next_id = 0 # GUARDED_BY(self._lock) self._args = {} self._iterators = {} def get_next_id(self, *args): with self._lock: ret = self._next_id self._next_id += 1 self._args[ret] = args # NOTE(mrry): Explicitly create an array of `np.int64` because implicit # casting in `py_func()` will create an array of `np.int32` on Windows, # leading to a runtime error. return np.array(ret, dtype=np.int64) def get_iterator(self, iterator_id): try: return self._iterators[iterator_id] except KeyError: iterator = iter(self._generator(*self._args.pop(iterator_id))) self._iterators[iterator_id] = iterator return iterator def iterator_completed(self, iterator_id): del self._iterators[iterator_id] @staticmethod def from_generator(generator, output_types, output_shapes=None, args=None): """Creates a `Dataset` whose elements are generated by `generator`. The `generator` argument must be a callable object that returns an object that supports the `iter()` protocol (e.g. a generator function). The elements generated by `generator` must be compatible with the given `output_types` and (optional) `output_shapes` arguments. For example: ```python import itertools tf.compat.v1.enable_eager_execution() def gen(): for i in itertools.count(1): yield (i, [1] * i) ds = tf.data.Dataset.from_generator( gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None]))) for value in ds.take(2): print value # (1, array([1])) # (2, array([1, 1])) ``` NOTE: The current implementation of `Dataset.from_generator()` uses `tf.numpy_function` and inherits the same constraints. In particular, it requires the `Dataset`- and `Iterator`-related operations to be placed on a device in the same process as the Python program that called `Dataset.from_generator()`. The body of `generator` will not be serialized in a `GraphDef`, and you should not use this method if you need to serialize your model and restore it in a different environment. NOTE: If `generator` depends on mutable global variables or other external state, be aware that the runtime may invoke `generator` multiple times (in order to support repeating the `Dataset`) and at any time between the call to `Dataset.from_generator()` and the production of the first element from the generator. Mutating global variables or external state can cause undefined behavior, and we recommend that you explicitly cache any external state in `generator` before calling `Dataset.from_generator()`. Args: generator: A callable object that returns an object that supports the `iter()` protocol. If `args` is not specified, `generator` must take no arguments; otherwise it must take as many arguments as there are values in `args`. output_types: A nested structure of `tf.DType` objects corresponding to each component of an element yielded by `generator`. output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects corresponding to each component of an element yielded by `generator`. args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated and passed to `generator` as NumPy-array arguments. Returns: Dataset: A `Dataset`. """ if not callable(generator): raise TypeError("`generator` must be callable.") if output_shapes is None: output_shapes = nest.map_structure( lambda _: tensor_shape.TensorShape(None), output_types) else: output_shapes = nest.map_structure_up_to( output_types, tensor_shape.as_shape, output_shapes) if args is None: args = () else: args = tuple(ops.convert_n_to_tensor(args, name="args")) flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)] flattened_shapes = nest.flatten(output_shapes) generator_state = DatasetV2._GeneratorState(generator) def get_iterator_id_fn(unused_dummy): """Creates a unique `iterator_id` for each pass over the dataset. The returned `iterator_id` disambiguates between multiple concurrently existing iterators. Args: unused_dummy: Ignored value. Returns: A `tf.int64` tensor whose value uniquely identifies an iterator in `generator_state`. """ return script_ops.numpy_function(generator_state.get_next_id, args, dtypes.int64) def generator_next_fn(iterator_id_t): """Generates the next element from iterator with ID `iterator_id_t`. We map this function across an infinite repetition of the `iterator_id_t`, and raise `StopIteration` to terminate the iteration. Args: iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the iterator in `generator_state` from which to generate an element. Returns: The next element to generate from the iterator. """ def generator_py_func(iterator_id): """A `py_func` that will be called to invoke the iterator.""" # `next()` raises `StopIteration` when there are no more # elements remaining to be generated. values = next(generator_state.get_iterator(iterator_id)) # Use the same _convert function from the py_func() implementation to # convert the returned values to arrays early, so that we can inspect # their values. try: flattened_values = nest.flatten_up_to(output_types, values) except (TypeError, ValueError): raise TypeError( "`generator` yielded an element that did not match the expected " "structure. The expected structure was %s, but the yielded " "element was %s." % (output_types, values)) ret_arrays = [] for ret, dtype in zip(flattened_values, flattened_types): try: ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access ret, dtype=dtype.as_numpy_dtype)) except (TypeError, ValueError): raise TypeError( "`generator` yielded an element that could not be converted to " "the expected type. The expected type was %s, but the yielded " "element was %s." % (dtype.name, ret)) # Additional type and shape checking to ensure that the components # of the generated element match the `output_types` and `output_shapes` # arguments. for (ret_array, expected_dtype, expected_shape) in zip( ret_arrays, flattened_types, flattened_shapes): if ret_array.dtype != expected_dtype.as_numpy_dtype: raise TypeError( "`generator` yielded an element of type %s where an element " "of type %s was expected." % (ret_array.dtype, expected_dtype.as_numpy_dtype)) if not expected_shape.is_compatible_with(ret_array.shape): raise ValueError( "`generator` yielded an element of shape %s where an element " "of shape %s was expected." % (ret_array.shape, expected_shape)) return ret_arrays flat_values = script_ops.numpy_function(generator_py_func, [iterator_id_t], flattened_types) # The `py_func()` op drops the inferred shapes, so we add them back in # here. if output_shapes is not None: for ret_t, shape in zip(flat_values, flattened_shapes): ret_t.set_shape(shape) return nest.pack_sequence_as(output_types, flat_values) def finalize_fn(iterator_id_t): """Releases host-side state for the iterator with ID `iterator_id_t`.""" def finalize_py_func(iterator_id): generator_state.iterator_completed(iterator_id) # We return a dummy value so that the `finalize_fn` has a valid # signature. # NOTE(mrry): Explicitly create an array of `np.int64` because implicit # casting in `py_func()` will create an array of `np.int32` on Windows, # leading to a runtime error. return np.array(0, dtype=np.int64) return script_ops.numpy_function(finalize_py_func, [iterator_id_t], dtypes.int64) # This function associates each traversal of `generator` with a unique # iterator ID. def flat_map_fn(dummy_arg): # The `get_iterator_id_fn` gets a unique ID for the current instance of # of the generator. # The `generator_next_fn` gets the next element from the iterator with the # given ID, and raises StopIteration when that iterator contains no # more elements. return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn, finalize_fn) # A single-element dataset that, each time it is evaluated, contains a # freshly-generated and unique (for the returned dataset) int64 # ID that will be used to identify the appropriate Python state, which # is encapsulated in `generator_state`, and captured in # `get_iterator_id_map_fn`. dummy = 0 id_dataset = Dataset.from_tensors(dummy) # A dataset that contains all of the elements generated by a # single iterator created from `generator`, identified by the # iterator ID contained in `id_dataset`. Lifting the iteration # into a flat_map here enables multiple repetitions and/or nested # versions of the returned dataset to be created, because it forces # the generation of a new ID for each version. return id_dataset.flat_map(flat_map_fn) @staticmethod def range(*args): """Creates a `Dataset` of a step-separated range of values. For example: ```python Dataset.range(5) == [0, 1, 2, 3, 4] Dataset.range(2, 5) == [2, 3, 4] Dataset.range(1, 5, 2) == [1, 3] Dataset.range(1, 5, -2) == [] Dataset.range(5, 1) == [] Dataset.range(5, 1, -2) == [5, 3] ``` Args: *args: follows the same semantics as python's xrange. len(args) == 1 -> start = 0, stop = args[0], step = 1 len(args) == 2 -> start = args[0], stop = args[1], step = 1 len(args) == 3 -> start = args[0], stop = args[1, stop = args[2] Returns: Dataset: A `RangeDataset`. Raises: ValueError: if len(args) == 0. """ return RangeDataset(*args) @staticmethod def zip(datasets): """Creates a `Dataset` by zipping together the given datasets. This method has similar semantics to the built-in `zip()` function in Python, with the main difference being that the `datasets` argument can be an arbitrary nested structure of `Dataset` objects. For example: ```python a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ] b = Dataset.range(4, 7) # ==> [ 4, 5, 6 ] c = Dataset.range(7, 13).batch(2) # ==> [ [7, 8], [9, 10], [11, 12] ] d = Dataset.range(13, 15) # ==> [ 13, 14 ] # The nested structure of the `datasets` argument determines the # structure of elements in the resulting dataset. Dataset.zip((a, b)) # ==> [ (1, 4), (2, 5), (3, 6) ] Dataset.zip((b, a)) # ==> [ (4, 1), (5, 2), (6, 3) ] # The `datasets` argument may contain an arbitrary number of # datasets. Dataset.zip((a, b, c)) # ==> [ (1, 4, [7, 8]), # (2, 5, [9, 10]), # (3, 6, [11, 12]) ] # The number of elements in the resulting dataset is the same as # the size of the smallest dataset in `datasets`. Dataset.zip((a, d)) # ==> [ (1, 13), (2, 14) ] ``` Args: datasets: A nested structure of datasets. Returns: Dataset: A `Dataset`. """ return ZipDataset(datasets) def concatenate(self, dataset): """Creates a `Dataset` by concatenating the given dataset with this dataset. ```python a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ] b = Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ] # The input dataset and dataset to be concatenated should have the same # nested structures and output types. # c = Dataset.range(8, 14).batch(2) # ==> [ [8, 9], [10, 11], [12, 13] ] # d = Dataset.from_tensor_slices([14.0, 15.0, 16.0]) # a.concatenate(c) and a.concatenate(d) would result in error. a.concatenate(b) # ==> [ 1, 2, 3, 4, 5, 6, 7 ] ``` Args: dataset: `Dataset` to be concatenated. Returns: Dataset: A `Dataset`. """ return ConcatenateDataset(self, dataset) def prefetch(self, buffer_size): """Creates a `Dataset` that prefetches elements from this dataset. Note: Like other `Dataset` methods, prefetch operates on the elements of the input dataset. It has no concept of examples vs. batches. `examples.prefetch(2)` will prefetch two elements (2 examples), while `examples.batch(20).prefetch(2)` will prefetch 2 elements (2 batches, of 20 examples each). Args: buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum number of elements that will be buffered when prefetching. Returns: Dataset: A `Dataset`. """ return PrefetchDataset(self, buffer_size) @staticmethod def list_files(file_pattern, shuffle=None, seed=None): """A dataset of all files matching one or more glob patterns. NOTE: The default behavior of this method is to return filenames in a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False` to get results in a deterministic order. Example: If we had the following files on our filesystem: - /path/to/dir/a.txt - /path/to/dir/b.py - /path/to/dir/c.py If we pass "/path/to/dir/*.py" as the directory, the dataset would produce: - /path/to/dir/b.py - /path/to/dir/c.py Args: file_pattern: A string, a list of strings, or a `tf.Tensor` of string type (scalar or vector), representing the filename glob (i.e. shell wildcard) pattern(s) that will be matched. shuffle: (Optional.) If `True`, the file names will be shuffled randomly. Defaults to `True`. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.compat.v1.set_random_seed` for behavior. Returns: Dataset: A `Dataset` of strings corresponding to file names. """ with ops.name_scope("list_files"): if shuffle is None: shuffle = True file_pattern = ops.convert_to_tensor( file_pattern, dtype=dtypes.string, name="file_pattern") matching_files = gen_io_ops.matching_files(file_pattern) # Raise an exception if `file_pattern` does not match any files. condition = math_ops.greater(array_ops.shape(matching_files)[0], 0, name="match_not_empty") message = math_ops.add( "No files matched pattern: ", string_ops.reduce_join(file_pattern, separator=", "), name="message") assert_not_empty = control_flow_ops.Assert( condition, [message], summarize=1, name="assert_not_empty") with ops.control_dependencies([assert_not_empty]): matching_files = array_ops.identity(matching_files) dataset = Dataset.from_tensor_slices(matching_files) if shuffle: # NOTE(mrry): The shuffle buffer size must be greater than zero, but the # list of files might be empty. buffer_size = math_ops.maximum( array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1) dataset = dataset.shuffle(buffer_size, seed=seed) return dataset def repeat(self, count=None): """Repeats this dataset `count` times. NOTE: If this dataset is a function of global state (e.g. a random number generator), then different repetitions may produce different elements. Args: count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the number of times the dataset should be repeated. The default behavior (if `count` is `None` or `-1`) is for the dataset be repeated indefinitely. Returns: Dataset: A `Dataset`. """ return RepeatDataset(self, count) def enumerate(self, start=0): """Enumerates the elements of this dataset. It is similar to python's `enumerate`. For example: ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. a = { 1, 2, 3 } b = { (7, 8), (9, 10) } # The nested structure of the `datasets` argument determines the # structure of elements in the resulting dataset. a.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) } b.enumerate() == { (0, (7, 8)), (1, (9, 10)) } ``` Args: start: A `tf.int64` scalar `tf.Tensor`, representing the start value for enumeration. Returns: Dataset: A `Dataset`. """ max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max return Dataset.zip((Dataset.range(start, max_value), self)) def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None): """Randomly shuffles the elements of this dataset. This dataset fills a buffer with `buffer_size` elements, then randomly samples elements from this buffer, replacing the selected elements with new elements. For perfect shuffling, a buffer size greater than or equal to the full size of the dataset is required. For instance, if your dataset contains 10,000 elements but `buffer_size` is set to 1,000, then `shuffle` will initially select a random element from only the first 1,000 elements in the buffer. Once an element is selected, its space in the buffer is replaced by the next (i.e. 1,001-st) element, maintaining the 1,000 element buffer. Args: buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements from this dataset from which the new dataset will sample. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.compat.v1.set_random_seed` for behavior. reshuffle_each_iteration: (Optional.) A boolean, which if true indicates that the dataset should be pseudorandomly reshuffled each time it is iterated over. (Defaults to `True`.) Returns: Dataset: A `Dataset`. """ return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration) def cache(self, filename=""): """Caches the elements in this dataset. Args: filename: A `tf.string` scalar `tf.Tensor`, representing the name of a directory on the filesystem to use for caching elements in this Dataset. If a filename is not provided, the dataset will be cached in memory. Returns: Dataset: A `Dataset`. """ return CacheDataset(self, filename) def take(self, count): """Creates a `Dataset` with at most `count` elements from this dataset. Args: count: A `tf.int64` scalar `tf.Tensor`, representing the number of elements of this dataset that should be taken to form the new dataset. If `count` is -1, or if `count` is greater than the size of this dataset, the new dataset will contain all elements of this dataset. Returns: Dataset: A `Dataset`. """ return TakeDataset(self, count) def skip(self, count): """Creates a `Dataset` that skips `count` elements from this dataset. Args: count: A `tf.int64` scalar `tf.Tensor`, representing the number of elements of this dataset that should be skipped to form the new dataset. If `count` is greater than the size of this dataset, the new dataset will contain no elements. If `count` is -1, skips the entire dataset. Returns: Dataset: A `Dataset`. """ return SkipDataset(self, count) def shard(self, num_shards, index): """Creates a `Dataset` that includes only 1/`num_shards` of this dataset. This dataset operator is very useful when running distributed training, as it allows each worker to read a unique subset. When reading a single input file, you can skip elements as follows: ```python d = tf.data.TFRecordDataset(input_file) d = d.shard(num_workers, worker_index) d = d.repeat(num_epochs) d = d.shuffle(shuffle_buffer_size) d = d.map(parser_fn, num_parallel_calls=num_map_threads) ``` Important caveats: - Be sure to shard before you use any randomizing operator (such as shuffle). - Generally it is best if the shard operator is used early in the dataset pipeline. For example, when reading from a set of TFRecord files, shard before converting the dataset to input samples. This avoids reading every file on every worker. The following is an example of an efficient sharding strategy within a complete pipeline: ```python d = Dataset.list_files(pattern) d = d.shard(num_workers, worker_index) d = d.repeat(num_epochs) d = d.shuffle(shuffle_buffer_size) d = d.interleave(tf.data.TFRecordDataset, cycle_length=num_readers, block_length=1) d = d.map(parser_fn, num_parallel_calls=num_map_threads) ``` Args: num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of shards operating in parallel. index: A `tf.int64` scalar `tf.Tensor`, representing the worker index. Returns: Dataset: A `Dataset`. Raises: InvalidArgumentError: if `num_shards` or `index` are illegal values. Note: error checking is done on a best-effort basis, and errors aren't guaranteed to be caught upon dataset creation. (e.g. providing in a placeholder tensor bypasses the early checking, and will instead result in an error during a session.run call.) """ return ShardDataset(self, num_shards, index) def batch(self, batch_size, drop_remainder=False): """Combines consecutive elements of this dataset into batches. The components of the resulting element will have an additional outer dimension, which will be `batch_size` (or `N % batch_size` for the last element if `batch_size` does not divide the number of input elements `N` evenly and `drop_remainder` is `False`). If your program depends on the batches having the same outer dimension, you should set the `drop_remainder` argument to `True` to prevent the smaller batch from being produced. Args: batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in the case it has fewer than `batch_size` elements; the default behavior is not to drop the smaller batch. Returns: Dataset: A `Dataset`. """ return BatchDataset(self, batch_size, drop_remainder) def padded_batch(self, batch_size, padded_shapes, padding_values=None, drop_remainder=False): """Combines consecutive elements of this dataset into padded batches. This transformation combines multiple consecutive elements of the input dataset into a single element. Like `tf.data.Dataset.batch`, the components of the resulting element will have an additional outer dimension, which will be `batch_size` (or `N % batch_size` for the last element if `batch_size` does not divide the number of input elements `N` evenly and `drop_remainder` is `False`). If your program depends on the batches having the same outer dimension, you should set the `drop_remainder` argument to `True` to prevent the smaller batch from being produced. Unlike `tf.data.Dataset.batch`, the input elements to be batched may have different shapes, and this transformation will pad each component to the respective shape in `padding_shapes`. The `padding_shapes` argument determines the resulting shape for each dimension of each component in an output element: * If the dimension is a constant (e.g. `tf.compat.v1.Dimension(37)`), the component will be padded out to that length in that dimension. * If the dimension is unknown (e.g. `tf.compat.v1.Dimension(None)`), the component will be padded out to the maximum length of all elements in that dimension. See also `tf.data.experimental.dense_to_sparse_batch`, which combines elements that may have different shapes into a `tf.SparseTensor`. Args: batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of consecutive elements of this dataset to combine in a single batch. padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector tensor-like objects representing the shape to which the respective component of each input element should be padded prior to batching. Any unknown dimensions (e.g. `tf.compat.v1.Dimension(None)` in a `tf.TensorShape` or `-1` in a tensor-like object) will be padded to the maximum size of that dimension in each batch. padding_values: (Optional.) A nested structure of scalar-shaped `tf.Tensor`, representing the padding values to use for the respective components. Defaults are `0` for numeric types and the empty string for string types. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether the last batch should be dropped in the case it has fewer than `batch_size` elements; the default behavior is not to drop the smaller batch. Returns: Dataset: A `Dataset`. """ return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values, drop_remainder) def map(self, map_func, num_parallel_calls=None): """Maps `map_func` across the elements of this dataset. This transformation applies `map_func` to each element of this dataset, and returns a new dataset containing the transformed elements, in the same order as they appeared in the input. For example: ```python a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ] a.map(lambda x: x + 1) # ==> [ 2, 3, 4, 5, 6 ] ``` The input signature of `map_func` is determined by the structure of each element in this dataset. For example: ```python # NOTE: The following examples use `{ ... }` to represent the # contents of a dataset. # Each element is a `tf.Tensor` object. a = { 1, 2, 3, 4, 5 } # `map_func` takes a single argument of type `tf.Tensor` with the same # shape and dtype. result = a.map(lambda x: ...) # Each element is a tuple containing two `tf.Tensor` objects. b = { (1, "foo"), (2, "bar"), (3, "baz") } # `map_func` takes two arguments of type `tf.Tensor`. result = b.map(lambda x_int, y_str: ...) # Each element is a dictionary mapping strings to `tf.Tensor` objects. c = { {"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}, {"a": 3, "b": "baz"} } # `map_func` takes a single argument of type `dict` with the same keys as # the elements. result = c.map(lambda d: ...) ``` The value or values returned by `map_func` determine the structure of each element in the returned dataset. ```python # `map_func` returns a scalar `tf.Tensor` of type `tf.float32`. def f(...): return tf.constant(37.0) result = dataset.map(f) result.output_classes == tf.Tensor result.output_types == tf.float32 result.output_shapes == [] # scalar # `map_func` returns two `tf.Tensor` objects. def g(...): return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"]) result = dataset.map(g) result.output_classes == (tf.Tensor, tf.Tensor) result.output_types == (tf.float32, tf.string) result.output_shapes == ([], [3]) # Python primitives, lists, and NumPy arrays are implicitly converted to # `tf.Tensor`. def h(...): return 37.0, ["Foo", "Bar", "Baz"], np.array([1.0, 2.0] dtype=np.float64) result = dataset.map(h) result.output_classes == (tf.Tensor, tf.Tensor, tf.Tensor) result.output_types == (tf.float32, tf.string, tf.float64) result.output_shapes == ([], [3], [2]) # `map_func` can return nested structures. def i(...): return {"a": 37.0, "b": [42, 16]}, "foo" result.output_classes == ({"a": tf.Tensor, "b": tf.Tensor}, tf.Tensor) result.output_types == ({"a": tf.float32, "b": tf.int32}, tf.string) result.output_shapes == ({"a": [], "b": [2]}, []) ``` `map_func` can accept as arguments and return any type of dataset element. Note that irrespective of the context in which `map_func` is defined (eager vs. graph), tf.data traces the function and executes it as a graph. To use Python code inside of the function you have two options: 1) Rely on AutoGraph to convert Python code into an equivalent graph computation. The downside of this approach is that AutoGraph can convert some but not all Python code. 2) Use `tf.py_function`, which allows you to write arbitrary Python code but will generally result in worse performance than 1). For example: ```python d = tf.data.Dataset.from_tensor_slices(['hello', 'world']) # transform a string tensor to upper case string using a Python function def upper_case_fn(t: tf.Tensor) -> str: return t.numpy().decode('utf-8').upper() d.map(lambda x: tf.py_function(func=upper_case_fn, inp=[x], Tout=tf.string)) # ==> [ "HELLO", "WORLD" ] ``` Args: map_func: A function mapping a dataset element to another dataset element. num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing the number elements to process asynchronously in parallel. If not specified, elements will be processed sequentially. If the value `tf.data.experimental.AUTOTUNE` is used, then the number of parallel calls is set dynamically based on available CPU. Returns: Dataset: A `Dataset`. """ if num_parallel_calls is None: return MapDataset(self, map_func, preserve_cardinality=True) else: return ParallelMapDataset( self, map_func, num_parallel_calls, preserve_cardinality=True) def flat_map(self, map_func): """Maps `map_func` across this dataset and flattens the result. Use `flat_map` if you want to make sure that the order of your dataset stays the same. For example, to flatten a dataset of batches into a dataset of their elements: ```python a = Dataset.from_tensor_slices([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) a.flat_map(lambda x: Dataset.from_tensor_slices(x + 1)) # ==> # [ 2, 3, 4, 5, 6, 7, 8, 9, 10 ] ``` `tf.data.Dataset.interleave()` is a generalization of `flat_map`, since `flat_map` produces the same output as `tf.data.Dataset.interleave(cycle_length=1)` Args: map_func: A function mapping a dataset element to a dataset. Returns: Dataset: A `Dataset`. """ return FlatMapDataset(self, map_func) def interleave(self, map_func, cycle_length=AUTOTUNE, block_length=1, num_parallel_calls=None): """Maps `map_func` across this dataset, and interleaves the results. For example, you can use `Dataset.interleave()` to process many input files concurrently: ```python # Preprocess 4 files concurrently, and interleave blocks of 16 records from # each file. filenames = ["/var/data/file1.txt", "/var/data/file2.txt", ...] dataset = (Dataset.from_tensor_slices(filenames) .interleave(lambda x: TextLineDataset(x).map(parse_fn, num_parallel_calls=1), cycle_length=4, block_length=16)) ``` The `cycle_length` and `block_length` arguments control the order in which elements are produced. `cycle_length` controls the number of input elements that are processed concurrently. If you set `cycle_length` to 1, this transformation will handle one input element at a time, and will produce identical results to `tf.data.Dataset.flat_map`. In general, this transformation will apply `map_func` to `cycle_length` input elements, open iterators on the returned `Dataset` objects, and cycle through them producing `block_length` consecutive elements from each iterator, and consuming the next input element each time it reaches the end of an iterator. For example: ```python a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ] # NOTE: New lines indicate "block" boundaries. a.interleave(lambda x: Dataset.from_tensors(x).repeat(6), cycle_length=2, block_length=4) # ==> [1, 1, 1, 1, # 2, 2, 2, 2, # 1, 1, # 2, 2, # 3, 3, 3, 3, # 4, 4, 4, 4, # 3, 3, # 4, 4, # 5, 5, 5, 5, # 5, 5] ``` NOTE: The order of elements yielded by this transformation is deterministic, as long as `map_func` is a pure function. If `map_func` contains any stateful operations, the order in which that state is accessed is undefined. Args: map_func: A function mapping a dataset element to a dataset. cycle_length: (Optional.) The number of input elements that will be processed concurrently. If not specified, the value will be derived from the number of available CPU cores. If the `num_parallel_calls` argument is set to `tf.data.experimental.AUTOTUNE`, the `cycle_length` argument also identifies the maximum degree of parallelism. block_length: (Optional.) The number of consecutive elements to produce from each input element before cycling to another input element. num_parallel_calls: (Optional.) If specified, the implementation creates a threadpool, which is used to fetch inputs from cycle elements asynchronously and in parallel. The default behavior is to fetch inputs from cycle elements synchronously with no parallelism. If the value `tf.data.experimental.AUTOTUNE` is used, then the number of parallel calls is set dynamically based on available CPU. Returns: Dataset: A `Dataset`. """ if num_parallel_calls is None: return InterleaveDataset(self, map_func, cycle_length, block_length) else: return ParallelInterleaveDataset(self, map_func, cycle_length, block_length, num_parallel_calls) def filter(self, predicate): """Filters this dataset according to `predicate`. ```python d = tf.data.Dataset.from_tensor_slices([1, 2, 3]) d = d.filter(lambda x: x < 3) # ==> [1, 2] # `tf.math.equal(x, y)` is required for equality comparison def filter_fn(x): return tf.math.equal(x, 1) d = d.filter(filter_fn) # ==> [1] ``` Args: predicate: A function mapping a dataset element to a boolean. Returns: Dataset: The `Dataset` containing the elements of this dataset for which `predicate` is `True`. """ return FilterDataset(self, predicate) def apply(self, transformation_func): """Applies a transformation function to this dataset. `apply` enables chaining of custom `Dataset` transformations, which are represented as functions that take one `Dataset` argument and return a transformed `Dataset`. For example: ``` dataset = (dataset.map(lambda x: x ** 2) .apply(group_by_window(key_func, reduce_func, window_size)) .map(lambda x: x ** 3)) ``` Args: transformation_func: A function that takes one `Dataset` argument and returns a `Dataset`. Returns: Dataset: The `Dataset` returned by applying `transformation_func` to this dataset. """ dataset = transformation_func(self) if not isinstance(dataset, DatasetV2): raise TypeError( "`transformation_func` must return a Dataset. Got {}.".format( dataset)) dataset._input_datasets = [self] # pylint: disable=protected-access return dataset def window(self, size, shift=None, stride=1, drop_remainder=False): """Combines (nests of) input elements into a dataset of (nests of) windows. A "window" is a finite dataset of flat elements of size `size` (or possibly fewer if there are not enough input elements to fill the window and `drop_remainder` evaluates to false). The `stride` argument determines the stride of the input elements, and the `shift` argument determines the shift of the window. For example, letting {...} to represent a Dataset: - `tf.data.Dataset.range(7).window(2)` produces `{{0, 1}, {2, 3}, {4, 5}, {6}}` - `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}` - `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}` Note that when the `window` transformation is applied to a dataset of nested elements, it produces a dataset of nested windows. For example: - `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)` produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}` - `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)` produces `{{"a": {0, 1}}, {"a": {2, 3}}}` Args: size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements of the input dataset to combine into a window. shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the forward shift of the sliding window in each iteration. Defaults to `size`. stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the stride of the input elements in the sliding window. drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing whether a window should be dropped in case its size is smaller than `window_size`. Returns: Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat elements created from the (nests of) input elements. """ if shift is None: shift = size return WindowDataset(self, size, shift, stride, drop_remainder) def reduce(self, initial_state, reduce_func): """Reduces the input dataset to a single element. The transformation calls `reduce_func` successively on every element of the input dataset until the dataset is exhausted, aggregating information in its internal state. The `initial_state` argument is used for the initial state and the final state is returned as the result. For example: - `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1)` produces `5` - `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y)` produces `10` Args: initial_state: An element representing the initial state of the transformation. reduce_func: A function that maps `(old_state, input_element)` to `new_state`. It must take two arguments and return a new element The structure of `new_state` must match the structure of `initial_state`. Returns: A dataset element corresponding to the final state of the transformation. """ with ops.name_scope("initial_state"): initial_state = structure.normalize_element(initial_state) state_structure = structure.type_spec_from_value(initial_state) # Iteratively rerun the reduce function until reaching a fixed point on # `state_structure`. need_to_rerun = True while need_to_rerun: wrapped_func = StructuredFunctionWrapper( reduce_func, "reduce()", input_structure=(state_structure, self.element_spec), add_to_graph=False) # Extract and validate class information from the returned values. output_classes = wrapped_func.output_classes state_classes = nest.map_structure( lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access state_structure) for new_state_class, state_class in zip( nest.flatten(output_classes), nest.flatten(state_classes)): if not issubclass(new_state_class, state_class): raise TypeError( "The element classes for the new state must match the initial " "state. Expected %s; got %s." % (state_classes, wrapped_func.output_classes)) # Extract and validate type information from the returned values. output_types = wrapped_func.output_types state_types = nest.map_structure( lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access state_structure) for new_state_type, state_type in zip( nest.flatten(output_types), nest.flatten(state_types)): if new_state_type != state_type: raise TypeError( "The element types for the new state must match the initial " "state. Expected %s; got %s." % (state_types, wrapped_func.output_types)) # Extract shape information from the returned values. output_shapes = wrapped_func.output_shapes state_shapes = nest.map_structure( lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access state_structure) flat_state_shapes = nest.flatten(state_shapes) flat_new_state_shapes = nest.flatten(output_shapes) weakened_state_shapes = [ original.most_specific_compatible_shape(new) for original, new in zip(flat_state_shapes, flat_new_state_shapes) ] need_to_rerun = False for original_shape, weakened_shape in zip(flat_state_shapes, weakened_state_shapes): if original_shape.ndims is not None and ( weakened_shape.ndims is None or original_shape.as_list() != weakened_shape.as_list()): need_to_rerun = True break if need_to_rerun: # TODO(b/110122868): Support a "most specific compatible structure" # method for combining structures, to avoid using legacy structures # here. state_structure = structure.convert_legacy_structure( state_types, nest.pack_sequence_as(state_shapes, weakened_state_shapes), state_classes) reduce_func = wrapped_func.function reduce_func.add_to_graph(ops.get_default_graph()) # pylint: disable=protected-access return structure.from_compatible_tensor_list( state_structure, gen_dataset_ops.reduce_dataset( self._variant_tensor, structure.to_tensor_list(state_structure, initial_state), reduce_func.captured_inputs, f=reduce_func, output_shapes=structure.get_flat_tensor_shapes(state_structure), output_types=structure.get_flat_tensor_types(state_structure))) def unbatch(self): """Splits elements of a dataset into multiple elements. For example, if elements of the dataset are shaped `[B, a0, a1, ...]`, where `B` may vary for each input element, then for each element in the dataset, the unbatched dataset will contain `B` consecutive elements of shape `[a0, a1, ...]`. ```python # NOTE: The following example uses `{ ... }` to represent the contents # of a dataset. ds = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] } ds.unbatch() == {'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'} ``` Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ # NOTE(mrry): We must ensure that any non-tensor components in `dataset` # are normalized to their dense tensor representation, so that the # non-tensor oblivious unbatching logic will slice them appropriately. # This leads to a somewhat inefficient re-encoding step for all non-tensor # components. # # TODO(mrry): Consider optimizing this if it turns out to be a bottleneck. def normalize(arg, *rest): # pylint: disable=protected-access if rest: return structure.to_batched_tensor_list(self.element_spec, (arg,) + rest) else: return structure.to_batched_tensor_list(self.element_spec, arg) normalized_dataset = self.map(normalize) # NOTE(mrry): Our `map()` has lost information about the structure of # non-tensor components, so re-apply the structure of the original dataset. restructured_dataset = _RestructuredDataset(normalized_dataset, self.element_spec) return _UnbatchDataset(restructured_dataset) def with_options(self, options): """Returns a new `tf.data.Dataset` with the given options set. The options are "global" in the sense they apply to the entire dataset. If options are set multiple times, they are merged as long as different options do not use different non-default values. Args: options: A `tf.data.Options` that identifies the options the use. Returns: Dataset: A `Dataset` with the given options. Raises: ValueError: when an option is set more than once to a non-default value """ return _OptionsDataset(self, options) @tf_export(v1=["data.Dataset"]) class DatasetV1(DatasetV2): """Represents a potentially large set of elements. A `Dataset` can be used to represent an input pipeline as a collection of elements and a "logical plan" of transformations that act on those elements. """ def __init__(self): try: variant_tensor = self._as_variant_tensor() except AttributeError as e: if "_as_variant_tensor" in str(e): raise AttributeError("Please use _variant_tensor instead of " "_as_variant_tensor() to obtain the variant " "associated with a dataset") raise AttributeError("{}: A likely cause of this error is that the super " "call for this dataset is not the last line of the " "__init__ method. The base class causes the " "_as_variant_tensor call in its constructor and " "if that uses attributes defined in the __init__ " "method, those attrs need to be defined before the " "super call.".format(e)) super(DatasetV1, self).__init__(variant_tensor) @abc.abstractmethod def _as_variant_tensor(self): """Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset. Returns: A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset. """ raise NotImplementedError("Dataset._as_variant_tensor") @deprecation.deprecated( None, "Use `for ... in dataset:` to iterate over a dataset. If using " "`tf.estimator`, return the `Dataset` object directly from your input " "function. As a last resort, you can use " "`tf.compat.v1.data.make_one_shot_iterator(dataset)`.") def make_one_shot_iterator(self): """Creates an `Iterator` for enumerating the elements of this dataset. Note: The returned iterator will be initialized automatically. A "one-shot" iterator does not currently support re-initialization. Returns: An `Iterator` over the elements of this dataset. """ return self._make_one_shot_iterator() def _make_one_shot_iterator(self): # pylint: disable=missing-docstring if context.executing_eagerly(): with ops.device(self._variant_tensor.device): return iterator_ops.IteratorV2(self) _ensure_same_dataset_graph(self) # Now that we create datasets at python object creation time, the capture # by value _make_dataset() function would try to capture these variant # tensor dataset inputs, which are marked as stateful ops and would throw # an error if we try and capture them. We therefore traverse the graph # to find all these ops and whitelist them so that the capturing # logic instead of throwing an error recreates these ops which is what was # happening before. all_ds_ops = traverse.obtain_all_variant_tensor_ops(self) graph_level_seed, op_level_seed = core_random_seed.get_seed(None) # NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is # a 0-argument function. @function.Defun(capture_by_value=True, whitelisted_stateful_ops=all_ds_ops) def _make_dataset(): """Factory function for a dataset.""" # NOTE(mrry): `Defun` does not capture the graph-level seed from the # enclosing graph, so if a graph-level seed is present we set the local # graph seed based on a combination of the graph- and op-level seeds. if graph_level_seed is not None: assert op_level_seed is not None core_random_seed.set_random_seed( (graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1)) # TODO: DEKHTIARJonathan - Re-enable once stable. # prefetch_to_device = self.options().experimental_optimization.prefetch_to_device # if prefetch_to_device is not None: # logging.warning("GPU prefetching has been deactivated in your " # "`tf.data` pipeline. It is not compatible with " # "`tf.data.make_one_shot_iterator`. Please transition " # "to `tf.data.make_initializable_iterator` for maximum " # "performance.") # try: # options = Options() # options.experimental_optimization.prefetch_to_device = None # dataset = self.with_options(options) # except ValueError: # self._dataset._options.experimental_optimization.prefetch_to_device = None # dataset = self # else: # dataset = self dataset = self dataset = dataset._apply_options() return dataset._variant_tensor # pylint: disable=protected-access try: _make_dataset.add_to_graph(ops.get_default_graph()) except ValueError as err: if "Cannot capture a stateful node" in str(err): raise ValueError( "Failed to create a one-shot iterator for a dataset. " "`Dataset.make_one_shot_iterator()` does not support datasets that " "capture stateful objects, such as a `Variable` or `LookupTable`. " "In these cases, use `Dataset.make_initializable_iterator()`. " "(Original error: %s)" % err) else: six.reraise(ValueError, err) with ops.device(self._variant_tensor.device): # pylint: disable=protected-access return iterator_ops.Iterator( gen_dataset_ops.one_shot_iterator( dataset_factory=_make_dataset, **self._flat_structure), None, get_legacy_output_types(self), get_legacy_output_shapes(self), get_legacy_output_classes(self)) @deprecation.deprecated( None, "Use `for ... in dataset:` to iterate over a dataset. If using " "`tf.estimator`, return the `Dataset` object directly from your input " "function. As a last resort, you can use " "`tf.compat.v1.data.make_initializable_iterator(dataset)`.") def make_initializable_iterator(self, shared_name=None, force_deactivate_gpu_prefetching=False): """Creates an `Iterator` for enumerating the elements of this dataset. Note: The returned iterator will be in an uninitialized state, and you must run the `iterator.initializer` operation before using it: ```python dataset = ... iterator = dataset.make_initializable_iterator() # ... sess.run(iterator.initializer) ``` Args: shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). force_deactivate_gpu_prefetching (Optional) If True, will deactivate automatic GPU prefetching. Returns: An `Iterator` over the elements of this dataset. Raises: RuntimeError: If eager execution is enabled. """ return self._make_initializable_iterator(shared_name, force_deactivate_gpu_prefetching) def _make_initializable_iterator(self, shared_name=None, force_deactivate_gpu_prefetching=False): # pylint: disable=missing-docstring if context.executing_eagerly(): raise RuntimeError( "dataset.make_initializable_iterator is not supported when eager " "execution is enabled.") _ensure_same_dataset_graph(self) dataset = self # if force_deactivate_gpu_prefetching: # prefetch_to_device = dataset.options().experimental_optimization.prefetch_to_device # if prefetch_to_device is not None: # logging.warning("GPU prefetching has been deactivated in your " # "`tf.data` pipeline. It is not compatible with " # "`MultiDeviceIterator`.") # try: # options = Options() # options.experimental_optimization.prefetch_to_device = None # dataset = dataset.with_options(options) # except ValueError: # dataset._dataset._options.experimental_optimization.prefetch_to_device = None dataset = dataset._apply_options() if shared_name is None: shared_name = "" with ops.device(self._variant_tensor.device): iterator_resource = gen_dataset_ops.iterator_v2( container="", shared_name=shared_name, **self._flat_structure) initializer = gen_dataset_ops.make_iterator( dataset._variant_tensor, # pylint: disable=protected-access iterator_resource) # pylint: disable=protected-access return iterator_ops.Iterator(iterator_resource, initializer, get_legacy_output_types(dataset), get_legacy_output_shapes(dataset), get_legacy_output_classes(dataset)) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_classes(dataset)`.") def output_classes(self): """Returns the class of each component of an element of this dataset. Returns: A nested structure of Python `type` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access self.element_spec) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_shapes(dataset)`.") def output_shapes(self): """Returns the shape of each component of an element of this dataset. Returns: A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access self.element_spec) @property @deprecation.deprecated( None, "Use `tf.compat.v1.data.get_output_types(dataset)`.") def output_types(self): """Returns the type of each component of an element of this dataset. Returns: A nested structure of `tf.DType` objects corresponding to each component of an element of this dataset. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access self.element_spec) @property def element_spec(self): # TODO(b/110122868): Remove this override once all `Dataset` instances # implement `element_structure`. return structure.convert_legacy_structure( self.output_types, self.output_shapes, self.output_classes) @staticmethod @functools.wraps(DatasetV2.from_tensors) def from_tensors(tensors): return DatasetV1Adapter(DatasetV2.from_tensors(tensors)) @staticmethod @functools.wraps(DatasetV2.from_tensor_slices) def from_tensor_slices(tensors): return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors)) @staticmethod @deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.") def from_sparse_tensor_slices(sparse_tensor): """Splits each rank-N `tf.SparseTensor` in this dataset row-wise. Args: sparse_tensor: A `tf.SparseTensor`. Returns: Dataset: A `Dataset` of rank-(N-1) sparse tensors. """ return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor)) @staticmethod @functools.wraps(DatasetV2.from_generator) def from_generator(generator, output_types, output_shapes=None, args=None): return DatasetV1Adapter(DatasetV2.from_generator( generator, output_types, output_shapes, args)) @staticmethod @functools.wraps(DatasetV2.range) def range(*args): return DatasetV1Adapter(DatasetV2.range(*args)) @staticmethod @functools.wraps(DatasetV2.zip) def zip(datasets): return DatasetV1Adapter(DatasetV2.zip(datasets)) @functools.wraps(DatasetV2.concatenate) def concatenate(self, dataset): return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset)) @functools.wraps(DatasetV2.prefetch) def prefetch(self, buffer_size): return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size)) @staticmethod @functools.wraps(DatasetV2.list_files) def list_files(file_pattern, shuffle=None, seed=None): return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed)) @functools.wraps(DatasetV2.repeat) def repeat(self, count=None): return DatasetV1Adapter(super(DatasetV1, self).repeat(count)) @functools.wraps(DatasetV2.shuffle) def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None): return DatasetV1Adapter(super(DatasetV1, self).shuffle( buffer_size, seed, reshuffle_each_iteration)) @functools.wraps(DatasetV2.cache) def cache(self, filename=""): return DatasetV1Adapter(super(DatasetV1, self).cache(filename)) @functools.wraps(DatasetV2.take) def take(self, count): return DatasetV1Adapter(super(DatasetV1, self).take(count)) @functools.wraps(DatasetV2.skip) def skip(self, count): return DatasetV1Adapter(super(DatasetV1, self).skip(count)) @functools.wraps(DatasetV2.shard) def shard(self, num_shards, index): return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index)) @functools.wraps(DatasetV2.batch) def batch(self, batch_size, drop_remainder=False): return DatasetV1Adapter(super(DatasetV1, self).batch( batch_size, drop_remainder)) @functools.wraps(DatasetV2.padded_batch) def padded_batch(self, batch_size, padded_shapes, padding_values=None, drop_remainder=False): return DatasetV1Adapter(super(DatasetV1, self).padded_batch( batch_size, padded_shapes, padding_values, drop_remainder)) @functools.wraps(DatasetV2.map) def map(self, map_func, num_parallel_calls=None): if num_parallel_calls is None: return DatasetV1Adapter( MapDataset(self, map_func, preserve_cardinality=False)) else: return DatasetV1Adapter( ParallelMapDataset( self, map_func, num_parallel_calls, preserve_cardinality=False)) @deprecation.deprecated(None, "Use `tf.data.Dataset.map()") def map_with_legacy_function(self, map_func, num_parallel_calls=None): """Maps `map_func` across the elements of this dataset. NOTE: This is an escape hatch for existing uses of `map` that do not work with V2 functions. New uses are strongly discouraged and existing uses should migrate to `map` as this method will be removed in V2. Args: map_func: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to another nested structure of tensors. num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`, representing the number elements to process asynchronously in parallel. If not specified, elements will be processed sequentially. If the value `tf.data.experimental.AUTOTUNE` is used, then the number of parallel calls is set dynamically based on available CPU. Returns: Dataset: A `Dataset`. """ if num_parallel_calls is None: return DatasetV1Adapter( MapDataset( self, map_func, preserve_cardinality=False, use_legacy_function=True)) else: return DatasetV1Adapter( ParallelMapDataset( self, map_func, num_parallel_calls, preserve_cardinality=False, use_legacy_function=True)) @functools.wraps(DatasetV2.flat_map) def flat_map(self, map_func): return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func)) @functools.wraps(DatasetV2.interleave) def interleave(self, map_func, cycle_length=AUTOTUNE, block_length=1, num_parallel_calls=None): return DatasetV1Adapter(super(DatasetV1, self).interleave( map_func, cycle_length, block_length, num_parallel_calls)) @functools.wraps(DatasetV2.filter) def filter(self, predicate): return DatasetV1Adapter(super(DatasetV1, self).filter(predicate)) @deprecation.deprecated(None, "Use `tf.data.Dataset.filter()") def filter_with_legacy_function(self, predicate): """Filters this dataset according to `predicate`. NOTE: This is an escape hatch for existing uses of `filter` that do not work with V2 functions. New uses are strongly discouraged and existing uses should migrate to `filter` as this method will be removed in V2. Args: predicate: A function mapping a nested structure of tensors (having shapes and types defined by `self.output_shapes` and `self.output_types`) to a scalar `tf.bool` tensor. Returns: Dataset: The `Dataset` containing the elements of this dataset for which `predicate` is `True`. """ return FilterDataset(self, predicate, use_legacy_function=True) @functools.wraps(DatasetV2.apply) def apply(self, transformation_func): return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func)) @functools.wraps(DatasetV2.window) def window(self, size, shift=None, stride=1, drop_remainder=False): return DatasetV1Adapter(super(DatasetV1, self).window( size, shift, stride, drop_remainder)) @functools.wraps(DatasetV2.with_options) def with_options(self, options): return DatasetV1Adapter(super(DatasetV1, self).with_options(options)) # TODO(b/119044825): Until all `tf.data` unit tests are converted to V2, keep # this alias in place. Dataset = DatasetV1 class DatasetV1Adapter(DatasetV1): """Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API.""" def __init__(self, dataset): self._dataset = dataset super(DatasetV1Adapter, self).__init__() def _as_variant_tensor(self): return self._dataset._variant_tensor # pylint: disable=protected-access def _has_captured_ref(self): return self._dataset._has_captured_ref() # pylint: disable=protected-access def _inputs(self): return self._dataset._inputs() # pylint: disable=protected-access def _functions(self): return self._dataset._functions() # pylint: disable=protected-access def options(self): return self._dataset.options() @property def element_spec(self): return self._dataset.element_spec # pylint: disable=protected-access def __iter__(self): return iter(self._dataset) def _ensure_same_dataset_graph(dataset): """Walks the dataset graph to ensure all datasets come from the same graph.""" current_graph = ops.get_default_graph() bfs_q = Queue.Queue() bfs_q.put(dataset) # pylint: disable=protected-access visited = [] while not bfs_q.empty(): ds = bfs_q.get() visited.append(ds) ds_graph = ds._graph # pylint: disable=protected-access if current_graph != ds_graph: logging.warning("The graph (" + str(current_graph) + ") of the iterator " "is different from the graph (" + str(ds_graph) + ") " "the dataset: " + str(ds._variant_tensor) + " was " # pylint: disable=protected-access "created in. If you are using the Estimator API, " "make sure that no part of the dataset returned by the " "`input_fn` function is defined outside the `input_fn` " "function. Please ensure that all datasets in the " "pipeline are created in the same graph as the iterator. " "NOTE: This warning will become an error in future " "versions of TensorFlow.") for input_ds in ds._inputs(): # pylint: disable=protected-access if input_ds not in visited: bfs_q.put(input_ds) @tf_export(v1=["data.make_one_shot_iterator"]) def make_one_shot_iterator(dataset): """Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset. Note: The returned iterator will be initialized automatically. A "one-shot" iterator does not support re-initialization. Args: dataset: A `tf.data.Dataset`. Returns: A `tf.compat.v1.data.Iterator` over the elements of this dataset. """ try: # Call the defined `_make_one_shot_iterator()` if there is one, because some # datasets (e.g. for prefetching) override its behavior. return dataset._make_one_shot_iterator() # pylint: disable=protected-access except AttributeError: return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access @tf_export(v1=["data.make_initializable_iterator"]) def make_initializable_iterator(dataset, shared_name=None, force_deactivate_gpu_prefetching=False): """Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset. Note: The returned iterator will be in an uninitialized state, and you must run the `iterator.initializer` operation before using it: ```python dataset = ... iterator = tf.compat.v1.data.make_initializable_iterator(dataset) # ... sess.run(iterator.initializer) ``` Args: dataset: A `tf.data.Dataset`. shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). force_deactivate_gpu_prefetching (Optional) If True, will deactivate automatic GPU prefetching. Returns: A `tf.compat.v1.data.Iterator` over the elements of `dataset`. Raises: RuntimeError: If eager execution is enabled. """ try: # Call the defined `_make_initializable_iterator()` if there is one, because # some datasets (e.g. for prefetching) override its behavior. return dataset._make_initializable_iterator( shared_name, force_deactivate_gpu_prefetching) # pylint: disable=protected-access except AttributeError: return DatasetV1Adapter(dataset)._make_initializable_iterator( shared_name, force_deactivate_gpu_prefetching) # pylint: disable=protected-access @tf_export("data.experimental.get_structure") def get_structure(dataset_or_iterator): """Returns the type specification of an element of a `Dataset` or `Iterator`. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A nested structure of `tf.TypeSpec` objects matching the structure of an element of `dataset_or_iterator` and spacifying the type of individal components. Raises: TypeError: If `dataset_or_iterator` is not a `Dataset` or `Iterator` object. """ try: return dataset_or_iterator.element_spec # pylint: disable=protected-access except AttributeError: raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator " "object, but got %s." % type(dataset_or_iterator)) @tf_export(v1=["data.get_output_classes"]) def get_legacy_output_classes(dataset_or_iterator): """Returns the output classes of a `Dataset` or `Iterator` elements. This utility method replaces the deprecated-in-V2 `tf.compat.v1.Dataset.output_classes` property. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.IteratorV2`. Returns: A nested structure of Python `type` objects matching the structure of the dataset / iterator elements and specifying the class of the individual components. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access get_structure(dataset_or_iterator)) @tf_export(v1=["data.get_output_shapes"]) def get_legacy_output_shapes(dataset_or_iterator): """Returns the output shapes of a `Dataset` or `Iterator` elements. This utility method replaces the deprecated-in-V2 `tf.compat.v1.Dataset.output_shapes` property. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A nested structure of `tf.TensorShape` objects matching the structure of the dataset / iterator elements and specifying the shape of the individual components. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access get_structure(dataset_or_iterator)) @tf_export(v1=["data.get_output_types"]) def get_legacy_output_types(dataset_or_iterator): """Returns the output shapes of a `Dataset` or `Iterator` elements. This utility method replaces the deprecated-in-V2 `tf.compat.v1.Dataset.output_types` property. Args: dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`. Returns: A nested structure of `tf.DType` objects objects matching the structure of dataset / iterator elements and specifying the shape of the individual components. """ return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access get_structure(dataset_or_iterator)) @tf_export("data.Options") class Options(options_lib.OptionsBase): """Represents options for tf.data.Dataset. An `Options` object can be, for instance, used to control which static optimizations to apply or whether to use performance modeling to dynamically tune the parallelism of operations such as `tf.data.Dataset.map` or `tf.data.Dataset.interleave`. """ experimental_deterministic = options_lib.create_option( name="experimental_deterministic", ty=bool, docstring= "Whether the outputs need to be produced in deterministic order. If None," " defaults to True.") experimental_distribute = options_lib.create_option( name="experimental_distribute", ty=distribute_options.DistributeOptions, docstring= "The distribution strategy options associated with the dataset. See " "`tf.data.experimental.DistributeOptions` for more details.", default_factory=distribute_options.DistributeOptions) experimental_optimization = options_lib.create_option( name="experimental_optimization", ty=optimization_options.OptimizationOptions, docstring= "The optimization options associated with the dataset. See " "`tf.data.experimental.OptimizationOptions` for more details.", default_factory=optimization_options.OptimizationOptions) experimental_slack = options_lib.create_option( name="experimental_slack", ty=bool, docstring="Whether to introduce 'slack' in the last `prefetch` of the " "input pipeline, if it exists. This may reduce CPU contention with " "accelerator host-side activity at the start of a step. The slack " "frequency is determined by the number of devices attached to this " "input pipeline. If None, defaults to False.") experimental_stats = options_lib.create_option( name="experimental_stats", ty=stats_options.StatsOptions, docstring= "The statistics options associated with the dataset. See " "`tf.data.experimental.StatsOptions` for more details.", default_factory=stats_options.StatsOptions) experimental_threading = options_lib.create_option( name="experimental_threading", ty=threading_options.ThreadingOptions, docstring= "The threading options associated with the dataset. See " "`tf.data.experimental.ThreadingOptions` for more details.", default_factory=threading_options.ThreadingOptions) experimental_stateful_whitelist = options_lib.create_option( name="experimental_stateful_whitelist", ty=list, docstring="By default, tf.data will refuse to serialize a dataset or " "checkpoint its iterator if the dataset contains a stateful op as the " "serialization / checkpointing won't be able to capture its state. " "Users can -- at their own risk -- override this restriction by " "explicitly whitelisting stateful ops by specifying them in this list.") def _static_optimizations(self): """Produces the list of enabled static optimizations.""" result = [] result.extend(self.experimental_optimization._static_optimizations()) # pylint: disable=protected-access if self.experimental_deterministic is False: result.append("make_sloppy") if self.experimental_stats and self.experimental_stats.latency_all_edges: result.append("latency_all_edges") if (self.experimental_slack and self.experimental_optimization.prefetch_to_device is None): result.append("slack") if (self.experimental_distribute and self.experimental_distribute._make_stateless): # pylint: disable=protected-access result.append("make_stateless") return result def _static_optimization_configs(self): """Produces the list of configurations for enabled static optimizations.""" result = [] if self.experimental_optimization: result.extend( self.experimental_optimization._static_optimization_configs()) # pylint: disable=protected-access if self.experimental_slack: num_devices = self.experimental_distribute.num_devices if num_devices is None: num_devices = 1 result.append("slack:slack_period:%d" % num_devices) return result def merge(self, options): """Merges itself with the given `tf.data.Options`. The given `tf.data.Options` can be merged as long as there does not exist an attribute that is set to different values in `self` and `options`. Args: options: a `tf.data.Options` to merge with Raises: ValueError: if the given `tf.data.Options` cannot be merged Returns: New `tf.data.Options()` object which is the result of merging self with the input `tf.data.Options`. """ return options_lib.merge_options(self, options) class DatasetSource(DatasetV2): """Abstract class representing a dataset with no inputs.""" def _inputs(self): return [] class UnaryDataset(DatasetV2): """Abstract class representing a dataset with one input.""" def __init__(self, input_dataset, variant_tensor): self._input_dataset = input_dataset super(UnaryDataset, self).__init__(variant_tensor) def _inputs(self): return [self._input_dataset] class UnaryUnchangedStructureDataset(UnaryDataset): """Represents a unary dataset with the same input and output structure.""" def __init__(self, input_dataset, variant_tensor): self._input_dataset = input_dataset super(UnaryUnchangedStructureDataset, self).__init__( input_dataset, variant_tensor) @property def element_spec(self): return self._input_dataset.element_spec class TensorDataset(DatasetSource): """A `Dataset` with a single element.""" def __init__(self, element): """See `Dataset.from_tensors()` for details.""" element = structure.normalize_element(element) self._structure = structure.type_spec_from_value(element) self._tensors = structure.to_tensor_list(self._structure, element) variant_tensor = gen_dataset_ops.tensor_dataset( self._tensors, output_shapes=structure.get_flat_tensor_shapes(self._structure)) super(TensorDataset, self).__init__(variant_tensor) @property def element_spec(self): return self._structure class TensorSliceDataset(DatasetSource): """A `Dataset` of slices from a dataset element.""" def __init__(self, element): """See `Dataset.from_tensor_slices()` for details.""" element = structure.normalize_element(element) batched_spec = structure.type_spec_from_value(element) self._tensors = structure.to_batched_tensor_list(batched_spec, element) self._structure = nest.map_structure( lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value( self._tensors[0].get_shape()[0])) for t in self._tensors[1:]: batch_dim.assert_is_compatible_with(tensor_shape.Dimension( tensor_shape.dimension_value(t.get_shape()[0]))) variant_tensor = gen_dataset_ops.tensor_slice_dataset( self._tensors, output_shapes=structure.get_flat_tensor_shapes(self._structure)) super(TensorSliceDataset, self).__init__(variant_tensor) @property def element_spec(self): return self._structure class SparseTensorSliceDataset(DatasetSource): """A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows.""" def __init__(self, sparse_tensor): """See `Dataset.from_sparse_tensor_slices()` for details.""" if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor): raise TypeError( "`sparse_tensor` must be a `tf.SparseTensor` object. Was {}.".format( sparse_tensor)) self._sparse_tensor = sparse_tensor indices_shape = self._sparse_tensor.indices.get_shape() shape_shape = self._sparse_tensor.dense_shape.get_shape() rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1) self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64), tensor_spec.TensorSpec([None], self._sparse_tensor.dtype), tensor_spec.TensorSpec([rank], dtypes.int64)) variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset( self._sparse_tensor.indices, self._sparse_tensor.values, self._sparse_tensor.dense_shape) super(SparseTensorSliceDataset, self).__init__(variant_tensor) @property def element_spec(self): return self._structure class _VariantDataset(DatasetV2): """A Dataset wrapper around a `tf.variant`-typed function argument.""" def __init__(self, dataset_variant, structure): self._structure = structure super(_VariantDataset, self).__init__(dataset_variant) def _inputs(self): return [] @property def element_spec(self): return self._structure class _NestedVariant(composite_tensor.CompositeTensor): def __init__(self, variant_tensor, element_spec, dataset_shape): self._variant_tensor = variant_tensor self._element_spec = element_spec self._dataset_shape = dataset_shape @property def _type_spec(self): return DatasetSpec(self._element_spec, self._dataset_shape) @tf_export("data.experimental.from_variant") def from_variant(variant, structure): """Constructs a dataset from the given variant and structure. Args: variant: A scalar `tf.variant` tensor representing a dataset. structure: A `tf.data.experimental.Structure` object representing the structure of each element in the dataset. Returns: A `tf.data.Dataset` instance. """ return _VariantDataset(variant, structure) # pylint: disable=protected-access @tf_export("data.experimental.to_variant") def to_variant(dataset): """Returns a variant representing the given dataset. Args: dataset: A `tf.data.Dataset`. Returns: A scalar `tf.variant` tensor representing the given dataset. """ return dataset._variant_tensor # pylint: disable=protected-access @tf_export( "data.DatasetSpec", v1=["data.DatasetSpec", "data.experimental.DatasetStructure"]) class DatasetSpec(type_spec.BatchableTypeSpec): """Type specification for `tf.data.Dataset`.""" __slots__ = ["_element_spec", "_dataset_shape"] def __init__(self, element_spec, dataset_shape=()): self._element_spec = element_spec self._dataset_shape = tensor_shape.as_shape(dataset_shape) @property def value_type(self): return _VariantDataset def _serialize(self): return (self._element_spec, self._dataset_shape) @property def _component_specs(self): return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant) def _to_components(self, value): return value._variant_tensor # pylint: disable=protected-access def _from_components(self, components): # pylint: disable=protected-access if self._dataset_shape.ndims == 0: return _VariantDataset(components, self._element_spec) else: return _NestedVariant(components, self._element_spec, self._dataset_shape) def _to_tensor_list(self, value): return [ ops.convert_to_tensor( tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access ] @staticmethod def from_value(value): return DatasetSpec(value.element_spec) # pylint: disable=protected-access def _batch(self, batch_size): return DatasetSpec( self._element_spec, tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape)) def _unbatch(self): if self._dataset_shape.ndims == 0: raise ValueError("Unbatching a dataset is only supported for rank >= 1") return DatasetSpec(self._element_spec, self._dataset_shape[1:]) def _to_batched_tensor_list(self, value): if self._dataset_shape.ndims == 0: raise ValueError("Unbatching a dataset is only supported for rank >= 1") return self._to_tensor_list(value) def _to_legacy_output_types(self): return self def _to_legacy_output_shapes(self): return self def _to_legacy_output_classes(self): return self class StructuredFunctionWrapper(object): """A function wrapper that supports structured arguments and return values.""" # pylint: disable=protected-access def __init__(self, func, transformation_name, dataset=None, input_classes=None, input_shapes=None, input_types=None, input_structure=None, add_to_graph=True, use_legacy_function=False, defun_kwargs=None): """Creates a new `StructuredFunctionWrapper` for the given function. Args: func: A function from a nested structure to another nested structure. transformation_name: Human-readable name of the transformation in which this function is being instantiated, for error messages. dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this dataset will be assumed as the structure for `func` arguments; otherwise `input_classes`, `input_shapes`, and `input_types` must be defined. input_classes: (Optional.) A nested structure of `type`. If given, this argument defines the Python types for `func` arguments. input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If given, this argument defines the shapes and structure for `func` arguments. input_types: (Optional.) A nested structure of `tf.DType`. If given, this argument defines the element types and structure for `func` arguments. input_structure: (Optional.) A `Structure` object. If given, this argument defines the element types and structure for `func` arguments. add_to_graph: (Optional.) If `True`, the function will be added to the default graph. use_legacy_function: (Optional.) A boolean that determines whether the function be created using `tensorflow.python.eager.function.defun` (default behavior) or `tensorflow.python.framework.function.Defun` (legacy beheavior). defun_kwargs: (Optional.) A dictionary mapping string argument names to values. If supplied, will be passed to `function` as keyword arguments. Raises: ValueError: If an invalid combination of `dataset`, `input_classes`, `input_shapes`, and `input_types` is passed. """ if input_structure is None: if dataset is None: if input_classes is None or input_shapes is None or input_types is None: raise ValueError("Either `dataset`, `input_structure` or all of " "`input_classes`, `input_shapes`, and `input_types` " "must be specified.") self._input_structure = structure.convert_legacy_structure( input_types, input_shapes, input_classes) else: if not (input_classes is None and input_shapes is None and input_types is None): raise ValueError("Either `dataset`, `input_structure` or all of " "`input_classes`, `input_shapes`, and `input_types` " "must be specified.") self._input_structure = dataset.element_spec else: if not (dataset is None and input_classes is None and input_shapes is None and input_types is None): raise ValueError("Either `dataset`, `input_structure`, or all of " "`input_classes`, `input_shapes`, and `input_types` " "must be specified.") self._input_structure = input_structure self._func = func if defun_kwargs is None: defun_kwargs = {} readable_transformation_name = transformation_name.replace( ".", "_")[:-2] if len(transformation_name) > 2 else "" func_name = "_".join( [readable_transformation_name, function_utils.get_func_name(func)]) ag_ctx = autograph_ctx.control_status_ctx() def _warn_if_collections(transformation_name): """Prints a warning if the given graph uses common graph collections. NOTE(mrry): Currently a warning is only generated for resources. Any variables created will be automatically hoisted out to the outermost scope using `init_scope()`. Some collections (such as for control-flow contexts) are benign and should not generate a warning. Args: transformation_name: A human-readable name for the transformation. """ warnings.warn("Creating resources inside a function passed to %s " "is not supported. Create each resource outside the " "function, and capture it inside the function to use it." % transformation_name, stacklevel=5) def _wrapper_helper(*args): """Wrapper for passing nested structures to and from tf.data functions.""" nested_args = structure.from_compatible_tensor_list( self._input_structure, args) if not _should_unpack_args(nested_args): nested_args = (nested_args,) ret = autograph.tf_convert(func, ag_ctx)(*nested_args) # If `func` returns a list of tensors, `nest.flatten()` and # `ops.convert_to_tensor()` would conspire to attempt to stack # those tensors into a single tensor, because the customized # version of `nest.flatten()` does not recurse into lists. Since # it is more likely that the list arose from returning the # result of an operation (such as `tf.numpy_function()`) that returns a # list of not-necessarily-stackable tensors, we treat the # returned value is a `tuple` instead. A user wishing to pack # the return value into a single tensor can use an explicit # `tf.stack()` before returning. if isinstance(ret, list): ret = tuple(ret) try: self._output_structure = structure.type_spec_from_value(ret) except (ValueError, TypeError): raise TypeError("Unsupported return value from function passed to " "%s: %s." % (transformation_name, ret)) return ret if use_legacy_function: func_name = func_name + "_" + str(ops.uid()) @function.Defun( *structure.get_flat_tensor_types(self._input_structure), func_name=func_name, **defun_kwargs) def wrapper_fn(*args): ret = _wrapper_helper(*args) # _warn_if_collections(transformation_name, ops.get_default_graph(), 0) return structure.to_tensor_list(self._output_structure, ret) self._function = wrapper_fn resource_tracker = tracking.ResourceTracker() with tracking.resource_tracker_scope(resource_tracker): if add_to_graph: self._function.add_to_graph(ops.get_default_graph()) else: # Use the private method that will execute `wrapper_fn` but delay # adding it to the graph in case (e.g.) we need to rerun the function. self._function._create_definition_if_needed() if resource_tracker.resources: _warn_if_collections(transformation_name) else: defun_kwargs.update({"func_name": func_name}) # Note: _wrapper_helper will apply autograph based on context. @eager_function.defun_with_attributes( input_signature=structure.get_flat_tensor_specs( self._input_structure), autograph=False, attributes=defun_kwargs) def wrapper_fn(*args): # pylint: disable=missing-docstring ret = _wrapper_helper(*args) ret = structure.to_tensor_list(self._output_structure, ret) return [ops.convert_to_tensor(t) for t in ret] resource_tracker = tracking.ResourceTracker() with tracking.resource_tracker_scope(resource_tracker): self._function = wrapper_fn._get_concrete_function_internal() if add_to_graph: self._function.add_to_graph(ops.get_default_graph()) if resource_tracker.resources: _warn_if_collections(transformation_name) outer_graph_seed = ops.get_default_graph().seed if outer_graph_seed and self._function.graph.seed == outer_graph_seed: if self._function.graph._seed_used: warnings.warn( "Seed %s from outer graph might be getting used by function %s, " "if the random op has not been provided any seed. Explicitly set " "the seed in the function if this is not the intended behavior." %(outer_graph_seed, func_name), stacklevel=4) # pylint: enable=protected-access @property def output_structure(self): return self._output_structure @property def output_classes(self): return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access self._output_structure) @property def output_shapes(self): return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access self._output_structure) @property def output_types(self): return nest.map_structure( lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access self._output_structure) @property def function(self): return self._function class _GeneratorDataset(DatasetSource): """A `Dataset` that generates elements by invoking a function.""" def __init__(self, init_args, init_func, next_func, finalize_func): """Constructs a `_GeneratorDataset`. Args: init_args: A nested structure representing the arguments to `init_func`. init_func: A TensorFlow function that will be called on `init_args` each time a C++ iterator over this dataset is constructed. Returns a nested structure representing the "state" of the dataset. next_func: A TensorFlow function that will be called on the result of `init_func` to produce each element, and that raises `OutOfRangeError` to terminate iteration. finalize_func: A TensorFlow function that will be called on the result of `init_func` immediately before a C++ iterator over this dataset is destroyed. The return value is ignored. """ self._init_args = init_args self._init_structure = structure.type_spec_from_value(init_args) self._init_func = StructuredFunctionWrapper( init_func, self._transformation_name(), input_structure=self._init_structure) self._next_func = StructuredFunctionWrapper( next_func, self._transformation_name(), input_structure=self._init_func.output_structure) self._finalize_func = StructuredFunctionWrapper( finalize_func, self._transformation_name(), input_structure=self._init_func.output_structure) variant_tensor = gen_dataset_ops.generator_dataset( structure.to_tensor_list(self._init_structure, self._init_args) + self._init_func.function.captured_inputs, self._next_func.function.captured_inputs, self._finalize_func.function.captured_inputs, init_func=self._init_func.function, next_func=self._next_func.function, finalize_func=self._finalize_func.function, **self._flat_structure) super(_GeneratorDataset, self).__init__(variant_tensor) @property def element_spec(self): return self._next_func.output_structure def _transformation_name(self): return "Dataset.from_generator()" class ZipDataset(DatasetV2): """A `Dataset` that zips its inputs together.""" def __init__(self, datasets): """See `Dataset.zip()` for details.""" for ds in nest.flatten(datasets): if not isinstance(ds, DatasetV2): if isinstance(ds, list): message = ("The argument to `Dataset.zip()` must be a nested " "structure of `Dataset` objects. Nested structures do not " "support Python lists; please use a tuple instead.") else: message = ("The argument to `Dataset.zip()` must be a nested " "structure of `Dataset` objects.") raise TypeError(message) self._datasets = datasets self._structure = nest.pack_sequence_as( self._datasets, [ds.element_spec for ds in nest.flatten(self._datasets)]) variant_tensor = gen_dataset_ops.zip_dataset( [ds._variant_tensor for ds in nest.flatten(self._datasets)], **self._flat_structure) super(ZipDataset, self).__init__(variant_tensor) def _inputs(self): return nest.flatten(self._datasets) @property def element_spec(self): return self._structure class ConcatenateDataset(DatasetV2): """A `Dataset` that concatenates its input with given dataset.""" def __init__(self, input_dataset, dataset_to_concatenate): """See `Dataset.concatenate()` for details.""" self._input_dataset = input_dataset self._dataset_to_concatenate = dataset_to_concatenate output_types = get_legacy_output_types(input_dataset) if output_types != get_legacy_output_types(dataset_to_concatenate): raise TypeError( "Two datasets to concatenate have different types %s and %s" % (output_types, get_legacy_output_types(dataset_to_concatenate))) output_classes = get_legacy_output_classes(input_dataset) if output_classes != get_legacy_output_classes(dataset_to_concatenate): raise TypeError( "Two datasets to concatenate have different classes %s and %s" % (output_classes, get_legacy_output_classes(dataset_to_concatenate))) input_shapes = get_legacy_output_shapes(self._input_dataset) output_shapes = nest.pack_sequence_as(input_shapes, [ ts1.most_specific_compatible_shape(ts2) for (ts1, ts2) in zip( nest.flatten(input_shapes), nest.flatten(get_legacy_output_shapes( self._dataset_to_concatenate))) ]) self._structure = structure.convert_legacy_structure( output_types, output_shapes, output_classes) self._input_datasets = [input_dataset, dataset_to_concatenate] # pylint: disable=protected-access variant_tensor = gen_dataset_ops.concatenate_dataset( input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor, **self._flat_structure) # pylint: enable=protected-access super(ConcatenateDataset, self).__init__(variant_tensor) def _inputs(self): return self._input_datasets @property def element_spec(self): return self._structure class RepeatDataset(UnaryUnchangedStructureDataset): """A `Dataset` that repeats its input several times.""" def __init__(self, input_dataset, count): """See `Dataset.repeat()` for details.""" self._input_dataset = input_dataset if count is None: self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count") else: self._count = ops.convert_to_tensor( count, dtype=dtypes.int64, name="count") variant_tensor = gen_dataset_ops.repeat_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access count=self._count, **self._flat_structure) super(RepeatDataset, self).__init__(input_dataset, variant_tensor) class RangeDataset(DatasetSource): """A `Dataset` of a step separated range of values.""" def __init__(self, *args): """See `Dataset.range()` for details.""" self._parse_args(*args) self._structure = tensor_spec.TensorSpec([], dtypes.int64) variant_tensor = gen_dataset_ops.range_dataset( start=self._start, stop=self._stop, step=self._step, **self._flat_structure) super(RangeDataset, self).__init__(variant_tensor) def _parse_args(self, *args): """Parse arguments according to the same rules as the `range()` builtin.""" if len(args) == 1: self._start = self._build_tensor(0, "start") self._stop = self._build_tensor(args[0], "stop") self._step = self._build_tensor(1, "step") elif len(args) == 2: self._start = self._build_tensor(args[0], "start") self._stop = self._build_tensor(args[1], "stop") self._step = self._build_tensor(1, "step") elif len(args) == 3: self._start = self._build_tensor(args[0], "start") self._stop = self._build_tensor(args[1], "stop") self._step = self._build_tensor(args[2], "step") else: raise ValueError("Invalid arguments to RangeDataset: %s" % str(args)) def _build_tensor(self, int64_value, name): return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name) @property def element_spec(self): return self._structure class _MemoryCacheDeleter(object): """An object which cleans up an anonymous memory cache resource. An alternative to defining a __del__ method on an object. Even if the parent object is part of a reference cycle, the cycle will be collectable. """ def __init__(self, handle, device, deleter): self._deleter = deleter self._handle = handle self._device = device self._eager_mode = context.executing_eagerly() def __del__(self): with ops.device(self._device): # Make sure the resource is deleted in the same mode as it was created in. if self._eager_mode: with context.eager_mode(): gen_dataset_ops.delete_memory_cache( handle=self._handle, deleter=self._deleter) else: with context.graph_mode(): gen_dataset_ops.delete_memory_cache( handle=self._handle, deleter=self._deleter) class _MemoryCache(object): """Represents a memory cache resource.""" def __init__(self): super(_MemoryCache, self).__init__() self._device = context.context().device_name self._handle, self._deleter = (gen_dataset_ops.anonymous_memory_cache()) self._resource_deleter = _MemoryCacheDeleter( handle=self._handle, device=self._device, deleter=self._deleter) @property def handle(self): return self._handle class CacheDataset(UnaryUnchangedStructureDataset): """A `Dataset` that caches elements of its input.""" def __init__(self, input_dataset, filename): """See `Dataset.cache()` for details.""" self._input_dataset = input_dataset self._filename = ops.convert_to_tensor( filename, dtype=dtypes.string, name="filename") if tf2.enabled() and (context.executing_eagerly() or ops.get_default_graph()._building_function): # pylint: disable=protected-access self._cache = _MemoryCache() variant_tensor = gen_dataset_ops.cache_dataset_v2( input_dataset._variant_tensor, # pylint: disable=protected-access filename=self._filename, cache=self._cache.handle, **self._flat_structure) else: variant_tensor = gen_dataset_ops.cache_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access filename=self._filename, **self._flat_structure) super(CacheDataset, self).__init__(input_dataset, variant_tensor) class _RandomSeedGeneratorDeleter(object): """An object which cleans up an anonymous random seed generator resource. An alternative to defining a __del__ method on an object. Even if the parent object is part of a reference cycle, the cycle will be collectable. """ def __init__(self, handle, device, deleter): self._deleter = deleter self._handle = handle self._device = device self._eager_mode = context.executing_eagerly() def __del__(self): with ops.device(self._device): # Make sure the resource is deleted in the same mode as it was created in. if self._eager_mode: with context.eager_mode(): gen_dataset_ops.delete_random_seed_generator( handle=self._handle, deleter=self._deleter) else: with context.graph_mode(): gen_dataset_ops.delete_random_seed_generator( handle=self._handle, deleter=self._deleter) class _RandomSeedGenerator(object): """Represents a random seed generator resource.""" def __init__(self, seed, seed2): super(_RandomSeedGenerator, self).__init__() self._device = context.context().device_name self._handle, self._deleter = ( gen_dataset_ops.anonymous_random_seed_generator(seed=seed, seed2=seed2)) self._resource_deleter = _RandomSeedGeneratorDeleter( handle=self._handle, device=self._device, deleter=self._deleter) @property def handle(self): return self._handle class ShuffleDataset(UnaryUnchangedStructureDataset): """A `Dataset` that randomly shuffles the elements of its input.""" def __init__(self, input_dataset, buffer_size, seed=None, reshuffle_each_iteration=None): """Randomly shuffles the elements of this dataset. Args: input_dataset: The input dataset. buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements from this dataset from which the new dataset will sample. seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random seed that will be used to create the distribution. See `tf.compat.v1.set_random_seed` for behavior. reshuffle_each_iteration: (Optional.) A boolean, which if true indicates that the dataset should be pseudorandomly reshuffled each time it is iterated over. (Defaults to `True`.) Returns: A `Dataset`. Raises: ValueError: if invalid arguments are provided. """ self._input_dataset = input_dataset self._buffer_size = ops.convert_to_tensor( buffer_size, dtype=dtypes.int64, name="buffer_size") self._seed, self._seed2 = random_seed.get_seed(seed) if reshuffle_each_iteration is None: self._reshuffle_each_iteration = True else: self._reshuffle_each_iteration = reshuffle_each_iteration if tf2.enabled() and self._reshuffle_each_iteration and ( context.executing_eagerly() or ops.get_default_graph()._building_function): # pylint: disable=protected-access self._seed_generator = _RandomSeedGenerator(self._seed, self._seed2) variant_tensor = gen_dataset_ops.shuffle_dataset_v2( input_dataset._variant_tensor, # pylint: disable=protected-access buffer_size=self._buffer_size, seed_generator=self._seed_generator.handle, **self._flat_structure) else: variant_tensor = gen_dataset_ops.shuffle_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access buffer_size=self._buffer_size, seed=self._seed, seed2=self._seed2, reshuffle_each_iteration=self._reshuffle_each_iteration, **self._flat_structure) super(ShuffleDataset, self).__init__(input_dataset, variant_tensor) class TakeDataset(UnaryUnchangedStructureDataset): """A `Dataset` containing the first `count` elements from its input.""" def __init__(self, input_dataset, count): """See `Dataset.take()` for details.""" self._input_dataset = input_dataset self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count") variant_tensor = gen_dataset_ops.take_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access count=self._count, **self._flat_structure) super(TakeDataset, self).__init__(input_dataset, variant_tensor) class SkipDataset(UnaryUnchangedStructureDataset): """A `Dataset` skipping the first `count` elements from its input.""" def __init__(self, input_dataset, count): """See `Dataset.skip()` for details.""" self._input_dataset = input_dataset self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count") variant_tensor = gen_dataset_ops.skip_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access count=self._count, **self._flat_structure) super(SkipDataset, self).__init__(input_dataset, variant_tensor) class ShardDataset(UnaryUnchangedStructureDataset): """A `Dataset` for sharding its input.""" def __init__(self, input_dataset, num_shards, index): """See `Dataset.shard()` for details.""" self._input_dataset = input_dataset self._num_shards = ops.convert_to_tensor( num_shards, dtype=dtypes.int64, name="num_shards") self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index") variant_tensor = gen_dataset_ops.shard_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access num_shards=self._num_shards, index=self._index, **self._flat_structure) super(ShardDataset, self).__init__(input_dataset, variant_tensor) class BatchDataset(UnaryDataset): """A `Dataset` that batches contiguous elements from its input.""" def __init__(self, input_dataset, batch_size, drop_remainder): """See `Dataset.batch()` for details.""" self._input_dataset = input_dataset self._batch_size = ops.convert_to_tensor( batch_size, dtype=dtypes.int64, name="batch_size") self._drop_remainder = ops.convert_to_tensor( drop_remainder, dtype=dtypes.bool, name="drop_remainder") constant_drop_remainder = tensor_util.constant_value(self._drop_remainder) # pylint: disable=protected-access if constant_drop_remainder: # NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically) # or `False` (explicitly retaining the remainder). # pylint: disable=g-long-lambda self._structure = nest.map_structure( lambda component_spec: component_spec._batch( tensor_util.constant_value(self._batch_size)), input_dataset.element_spec) else: self._structure = nest.map_structure( lambda component_spec: component_spec._batch(None), input_dataset.element_spec) variant_tensor = gen_dataset_ops.batch_dataset_v2( input_dataset._variant_tensor, batch_size=self._batch_size, drop_remainder=self._drop_remainder, **self._flat_structure) super(BatchDataset, self).__init__(input_dataset, variant_tensor) @property def element_spec(self): return self._structure class _VariantTracker(tracking.CapturableResource): """Allows export of functions capturing a Dataset in SavedModels. When saving a SavedModel, `tf.saved_model.save` traverses the object graph. Since Datasets reference _VariantTracker objects, that traversal will find a _VariantTracker for each Dataset and so know how to save and restore functions which reference the Dataset's variant Tensor. """ def __init__(self, variant_tensor, resource_creator): """Record that `variant_tensor` is associated with `resource_creator`. Args: variant_tensor: The variant-dtype Tensor associated with the Dataset. This Tensor will be a captured input to functions which use the Dataset, and is used by saving code to identify the corresponding _VariantTracker. resource_creator: A zero-argument function which creates a new variant-dtype Tensor. This function will be included in SavedModels and run to re-create the Dataset's variant Tensor on restore. """ super(_VariantTracker, self).__init__(device="CPU") self._resource_handle = variant_tensor self._create_resource = resource_creator def _is_padded_shape_compatible_with(padded_shape, input_component_shape): """Returns `True` if `input_component_shape` can be padded to `padded_shape`. Args: padded_shape: A `tf.TensorShape`. input_component_shape: A `tf.TensorShape`. Returns: `True` if `input_component_shape` can be padded to `padded_shape`, otherwise `False`. """ if padded_shape.dims is None or input_component_shape.dims is None: return True if len(padded_shape.dims) != len(input_component_shape.dims): return False for padded_dim, input_dim in zip( padded_shape.dims, input_component_shape.dims): if (padded_dim.value is not None and input_dim.value is not None and padded_dim.value < input_dim.value): return False return True def _padded_shape_to_tensor(padded_shape, input_component_shape): """Converts `padded_shape` to a `tf.Tensor` representing that shape. Args: padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python sequence, or a 1-D `tf.Tensor` of `tf.int64` elements. input_component_shape: A `tf.TensorShape`, with which `padded_shape` must be compatible. Returns: A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`. Raises: ValueError: If `padded_shape` is not a shape or not compatible with `input_component_shape`. TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor. """ try: # Try to convert the `padded_shape` to a `tf.TensorShape` padded_shape_as_shape = tensor_shape.as_shape(padded_shape) # We will return the "canonical" tensor representation, which uses # `-1` in place of `None`. ret = ops.convert_to_tensor( [dim if dim is not None else -1 for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64) except (TypeError, ValueError): # The argument was not trivially convertible to a # `tf.TensorShape`, so fall back on the conversion to tensor # machinery. ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64) if ret.shape.dims is not None and len(ret.shape.dims) != 1: raise ValueError( "Padded shape %s must be a 1-D tensor of tf.int64 values, but its " "shape was %s." % (padded_shape, ret.shape)) if ret.dtype != dtypes.int64: raise TypeError( "Padded shape %s must be a 1-D tensor of tf.int64 values, but its " "element type was %s." % (padded_shape, ret.dtype.name)) padded_shape_as_shape = tensor_util.constant_value_as_shape(ret) if not _is_padded_shape_compatible_with(padded_shape_as_shape, input_component_shape): raise ValueError("The padded shape %s is not compatible with the " "corresponding input component shape %s." % (padded_shape_as_shape, input_component_shape)) return ret def _padding_value_to_tensor(value, output_type): """Converts the padding value to a tensor. Args: value: The padding value. output_type: Its expected dtype. Returns: A scalar `Tensor`. Raises: ValueError: if the padding value is not a scalar. TypeError: if the padding value's type does not match `output_type`. """ value = ops.convert_to_tensor(value, name="padding_value") if not value.shape.is_compatible_with(tensor_shape.TensorShape([])): raise ValueError("Padding value should be a scalar, but is not: %s" % value) if value.dtype != output_type: raise TypeError("Padding value tensor (%s) does not match output type: %s" % (value, output_type)) return value def _default_padding(input_dataset): """Returns default padding tensors in a structure matching `input_dataset`.""" def make_zero(t): if t.base_dtype == dtypes.string: return "" elif t.base_dtype == dtypes.variant: error_msg = ("Unable to create padding for field of type 'variant' " "because t.base_type == dtypes.variant == " "{}.".format( t.base_dtype)) raise TypeError(error_msg) else: return np.zeros_like(t.as_numpy_dtype()) return nest.map_structure( make_zero, get_legacy_output_types(input_dataset)) class PaddedBatchDataset(UnaryDataset): """A `Dataset` that batches and pads contiguous elements from its input.""" def __init__(self, input_dataset, batch_size, padded_shapes, padding_values, drop_remainder): """See `Dataset.batch()` for details.""" self._input_dataset = input_dataset if sparse.any_sparse(get_legacy_output_classes(input_dataset)): # TODO(b/63669786): support batching of sparse tensors raise TypeError( "Batching of padded sparse tensors is not currently supported") self._input_dataset = input_dataset self._batch_size = ops.convert_to_tensor( batch_size, dtype=dtypes.int64, name="batch_size") padding_values = ( padding_values if padding_values is not None else _default_padding(input_dataset)) input_shapes = get_legacy_output_shapes(input_dataset) flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes) flat_padded_shapes_as_tensors = [] for input_component_shape, padded_shape in zip( nest.flatten(input_shapes), flat_padded_shapes): flat_padded_shapes_as_tensors.append( _padded_shape_to_tensor(padded_shape, input_component_shape)) self._padded_shapes = nest.pack_sequence_as(input_shapes, flat_padded_shapes_as_tensors) self._padding_values = nest.map_structure_up_to( input_shapes, _padding_value_to_tensor, padding_values, get_legacy_output_types(input_dataset)) self._drop_remainder = ops.convert_to_tensor( drop_remainder, dtype=dtypes.bool, name="drop_remainder") def _padded_shape_to_batch_shape(s): return tensor_shape.TensorShape([ tensor_util.constant_value(self._batch_size) if smart_cond.smart_constant_value(self._drop_remainder) else None ]).concatenate(tensor_util.constant_value_as_shape(s)) output_shapes = nest.map_structure( _padded_shape_to_batch_shape, self._padded_shapes) self._structure = structure.convert_legacy_structure( get_legacy_output_types(self._input_dataset), output_shapes, get_legacy_output_classes(self._input_dataset)) # pylint: disable=protected-access # TODO(jsimsa): Switch to using v2 only any time after 6/30/2018. if smart_cond.smart_constant_value(self._drop_remainder) is False: variant_tensor = gen_dataset_ops.padded_batch_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access batch_size=self._batch_size, padded_shapes=[ ops.convert_to_tensor(s, dtype=dtypes.int64) for s in nest.flatten(self._padded_shapes) ], padding_values=nest.flatten(self._padding_values), output_shapes=structure.get_flat_tensor_shapes(self._structure)) else: variant_tensor = gen_dataset_ops.padded_batch_dataset_v2( input_dataset._variant_tensor, # pylint: disable=protected-access batch_size=self._batch_size, padded_shapes=[ ops.convert_to_tensor(s, dtype=dtypes.int64) for s in nest.flatten(self._padded_shapes) ], padding_values=nest.flatten(self._padding_values), drop_remainder=self._drop_remainder, output_shapes=structure.get_flat_tensor_shapes(self._structure)) super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor) @property def element_spec(self): return self._structure def _should_unpack_args(args): """Returns `True` if `args` should be `*args` when passed to a callable.""" return type(args) is tuple # pylint: disable=unidiomatic-typecheck class MapDataset(UnaryDataset): """A `Dataset` that maps a function over elements in its input.""" def __init__(self, input_dataset, map_func, use_inter_op_parallelism=True, preserve_cardinality=False, use_legacy_function=False): """See `Dataset.map()` for details.""" self._input_dataset = input_dataset self._use_inter_op_parallelism = use_inter_op_parallelism self._preserve_cardinality = preserve_cardinality self._map_func = StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset, use_legacy_function=use_legacy_function) variant_tensor = gen_dataset_ops.map_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, f=self._map_func.function, use_inter_op_parallelism=self._use_inter_op_parallelism, preserve_cardinality=self._preserve_cardinality, **self._flat_structure) super(MapDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._map_func.output_structure def _transformation_name(self): return "Dataset.map()" class ParallelMapDataset(UnaryDataset): """A `Dataset` that maps a function over elements in its input in parallel.""" def __init__(self, input_dataset, map_func, num_parallel_calls, use_inter_op_parallelism=True, preserve_cardinality=False, use_legacy_function=False): """See `Dataset.map()` for details.""" self._input_dataset = input_dataset self._use_inter_op_parallelism = use_inter_op_parallelism self._map_func = StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset, use_legacy_function=use_legacy_function) self._num_parallel_calls = ops.convert_to_tensor( num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls") self._preserve_cardinality = preserve_cardinality variant_tensor = gen_dataset_ops.parallel_map_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, f=self._map_func.function, num_parallel_calls=self._num_parallel_calls, use_inter_op_parallelism=self._use_inter_op_parallelism, preserve_cardinality=self._preserve_cardinality, **self._flat_structure) super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._map_func.output_structure def _transformation_name(self): return "Dataset.map()" class FlatMapDataset(UnaryDataset): """A `Dataset` that maps a function over its input and flattens the result.""" def __init__(self, input_dataset, map_func): """See `Dataset.flat_map()` for details.""" self._input_dataset = input_dataset self._map_func = StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset) if not isinstance(self._map_func.output_structure, DatasetSpec): raise TypeError( "`map_func` must return a `Dataset` object. Got {}".format( type(self._map_func.output_structure))) self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access variant_tensor = gen_dataset_ops.flat_map_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, f=self._map_func.function, **self._flat_structure) super(FlatMapDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._structure def _transformation_name(self): return "Dataset.flat_map()" class InterleaveDataset(UnaryDataset): """A `Dataset` that interleaves the result of transformed inputs.""" def __init__(self, input_dataset, map_func, cycle_length, block_length): """See `Dataset.interleave()` for details.""" self._input_dataset = input_dataset self._map_func = StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset) if not isinstance(self._map_func.output_structure, DatasetSpec): raise TypeError( "`map_func` must return a `Dataset` object. Got {}".format( type(self._map_func.output_structure))) self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") variant_tensor = gen_dataset_ops.interleave_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, # pylint: disable=protected-access self._cycle_length, self._block_length, f=self._map_func.function, **self._flat_structure) super(InterleaveDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._structure def _transformation_name(self): return "Dataset.interleave()" class ParallelInterleaveDataset(UnaryDataset): """A `Dataset` that maps a function over its input and interleaves the result.""" def __init__(self, input_dataset, map_func, cycle_length, block_length, num_parallel_calls): """See `Dataset.interleave()` for details.""" self._input_dataset = input_dataset self._map_func = StructuredFunctionWrapper( map_func, self._transformation_name(), dataset=input_dataset) if not isinstance(self._map_func.output_structure, DatasetSpec): raise TypeError( "`map_func` must return a `Dataset` object. Got {}".format( type(self._map_func.output_structure))) self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access self._cycle_length = ops.convert_to_tensor( cycle_length, dtype=dtypes.int64, name="cycle_length") self._block_length = ops.convert_to_tensor( block_length, dtype=dtypes.int64, name="block_length") self._num_parallel_calls = ops.convert_to_tensor( num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls") variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2( input_dataset._variant_tensor, # pylint: disable=protected-access self._map_func.function.captured_inputs, # pylint: disable=protected-access self._cycle_length, self._block_length, self._num_parallel_calls, f=self._map_func.function, **self._flat_structure) super(ParallelInterleaveDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._map_func] @property def element_spec(self): return self._structure def _transformation_name(self): return "Dataset.interleave()" class FilterDataset(UnaryUnchangedStructureDataset): """A `Dataset` that filters its input according to a predicate function.""" def __init__(self, input_dataset, predicate, use_legacy_function=False): """See `Dataset.filter()` for details.""" self._input_dataset = input_dataset wrapped_func = StructuredFunctionWrapper( predicate, self._transformation_name(), dataset=input_dataset, use_legacy_function=use_legacy_function) if not wrapped_func.output_structure.is_compatible_with( tensor_spec.TensorSpec([], dtypes.bool)): error_msg = ("`predicate` return type must be convertible to a scalar " "boolean tensor. Was {}.").format( wrapped_func.output_structure) raise ValueError(error_msg) self._predicate = wrapped_func variant_tensor = gen_dataset_ops.filter_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access other_arguments=self._predicate.function.captured_inputs, predicate=self._predicate.function, **self._flat_structure) super(FilterDataset, self).__init__(input_dataset, variant_tensor) def _functions(self): return [self._predicate] def _transformation_name(self): return "Dataset.filter()" class PrefetchDataset(UnaryUnchangedStructureDataset): """A `Dataset` that asynchronously prefetches its input.""" def __init__(self, input_dataset, buffer_size, slack_period=None): """See `Dataset.prefetch()` for details. Args: input_dataset: The input dataset. buffer_size: See `Dataset.prefetch()` for details. slack_period: (Optional.) An integer. If non-zero, determines the number of GetNext calls before injecting slack into the execution. This may reduce CPU contention at the start of a step. Note that a tensorflow user should not have to set this manually; enable this behavior automatically via `tf.data.Options.experimental_slack` instead. Defaults to None. """ self._input_dataset = input_dataset if buffer_size is None: buffer_size = -1 # This is the sentinel for auto-tuning. self._buffer_size = ops.convert_to_tensor( buffer_size, dtype=dtypes.int64, name="buffer_size") with ops.device(input_dataset._variant_tensor.device): variant_tensor = gen_dataset_ops.prefetch_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access buffer_size=self._buffer_size, slack_period=slack_period, **self._flat_structure) super(PrefetchDataset, self).__init__(input_dataset, variant_tensor) class WindowDataset(UnaryDataset): """A dataset that creates window datasets from the input elements.""" def __init__(self, input_dataset, size, shift, stride, drop_remainder): """See `window_dataset()` for more details.""" self._input_dataset = input_dataset self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size") self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift") self._stride = ops.convert_to_tensor( stride, dtype=dtypes.int64, name="stride") self._drop_remainder = ops.convert_to_tensor( drop_remainder, dtype=dtypes.bool, name="drop_remainder") self._structure = nest.pack_sequence_as( get_legacy_output_classes(input_dataset), [ DatasetSpec( # pylint: disable=g-complex-comprehension structure.convert_legacy_structure( output_type, output_shape, output_class)) for output_class, output_shape, output_type in zip( nest.flatten(get_legacy_output_classes(input_dataset)), nest.flatten(get_legacy_output_shapes(input_dataset)), nest.flatten(get_legacy_output_types(input_dataset))) ]) variant_tensor = gen_dataset_ops.window_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._size, self._shift, self._stride, self._drop_remainder, **self._flat_structure) super(WindowDataset, self).__init__(input_dataset, variant_tensor) @property def element_spec(self): return self._structure class _OptionsDataset(UnaryUnchangedStructureDataset): """An identity `Dataset` that stores options.""" def __init__(self, input_dataset, options): self._input_dataset = input_dataset self._options = input_dataset.options() if self._options: self._options = self._options.merge(options) else: self._options = options variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access super(_OptionsDataset, self).__init__(input_dataset, variant_tensor) def options(self): return self._options class _ModelDataset(UnaryUnchangedStructureDataset): """A `Dataset` that acts as an identity, and models performance.""" def __init__(self, input_dataset, algorithm, cpu_budget): self._input_dataset = input_dataset # TODO(jsimsa): This check is introduced for forward compatibility and can # be removed after 7/24/2019. At that point, all servers are expected to # recognize the `algorithm` attribute. if algorithm != AutotuneAlgorithm.HILL_CLIMB: variant_tensor = gen_dataset_ops.model_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access algorithm=algorithm, cpu_budget=cpu_budget, **self._flat_structure) else: variant_tensor = gen_dataset_ops.model_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access cpu_budget=cpu_budget, **self._flat_structure) super(_ModelDataset, self).__init__(input_dataset, variant_tensor) class _OptimizeDataset(UnaryUnchangedStructureDataset): """A `Dataset` that acts as an identity, and applies optimizations.""" def __init__(self, input_dataset, optimizations, optimization_configs=None): self._input_dataset = input_dataset if optimizations is None: optimizations = [] if optimization_configs is None: optimization_configs = [] self._optimizations = ops.convert_to_tensor( optimizations, dtype=dtypes.string, name="optimizations") variant_tensor = gen_dataset_ops.optimize_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._optimizations, optimization_configs=optimization_configs, **self._flat_structure) super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor) class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset): """A `Dataset` that acts as an identity, and sets a stats aggregator.""" def __init__(self, input_dataset, aggregator, prefix, counter_prefix): self._input_dataset = input_dataset self._stats_aggregator = aggregator self._prefix = prefix self._counter_prefix = counter_prefix variant_tensor = ged_ops.set_stats_aggregator_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._stats_aggregator._resource, # pylint: disable=protected-access self._prefix, self._counter_prefix, **self._flat_structure) super(_SetStatsAggregatorDataset, self).__init__(input_dataset, variant_tensor) class _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset): """A `Dataset` that acts as an identity, overriding intra-op parallelism.""" def __init__(self, input_dataset, max_intra_op_parallelism): self._input_dataset = input_dataset self._max_intra_op_parallelism = ops.convert_to_tensor( max_intra_op_parallelism, dtype=dtypes.int64, name="max_intra_op_parallelism") variant_tensor = ged_ops.max_intra_op_parallelism_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._max_intra_op_parallelism, **self._flat_structure) super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset, variant_tensor) class _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset): """A `Dataset` that acts as an identity, setting a private threadpool.""" def __init__(self, input_dataset, num_threads): self._input_dataset = input_dataset self._num_threads = ops.convert_to_tensor( num_threads, dtype=dtypes.int64, name="num_threads") variant_tensor = ged_ops.private_thread_pool_dataset( input_dataset._variant_tensor, # pylint: disable=protected-access self._num_threads, **self._flat_structure) super(_PrivateThreadPoolDataset, self).__init__(input_dataset, variant_tensor) class _RestructuredDataset(UnaryDataset): """An internal helper for changing the structure and shape of a dataset.""" def __init__(self, dataset, structure): self._input_dataset = dataset self._structure = structure variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access super(_RestructuredDataset, self).__init__(dataset, variant_tensor) @property def element_spec(self): return self._structure class _UnbatchDataset(UnaryDataset): """A dataset that splits the elements of its input into multiple elements.""" def __init__(self, input_dataset): """See `unbatch()` for more details.""" flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access if any(s.ndims == 0 for s in flat_shapes): raise ValueError("Cannot unbatch an input with scalar components.") known_batch_dim = tensor_shape.Dimension(None) for s in flat_shapes: try: known_batch_dim = known_batch_dim.merge_with(s[0]) except ValueError: raise ValueError("Cannot unbatch an input whose components have " "different batch sizes.") self._input_dataset = input_dataset self._structure = nest.map_structure( lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access get_structure(input_dataset)) variant_tensor = ged_ops.unbatch_dataset( self._input_dataset._variant_tensor, # pylint: disable=protected-access **self._flat_structure) super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor) @property def element_spec(self): return self._structure
tensorflow-r1.15.5-nv23.03
tensorflow/python/data/ops/dataset_ops.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A Python interface for creating TensorFlow servers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import device_attributes_pb2 from tensorflow.python import pywrap_tensorflow def list_local_devices(session_config=None): """List the available devices available in the local process. Args: session_config: a session config proto or None to use the default config. Returns: A list of `DeviceAttribute` protocol buffers. """ def _convert(pb_str): m = device_attributes_pb2.DeviceAttributes() m.ParseFromString(pb_str) return m return [ _convert(s) for s in pywrap_tensorflow.list_devices(session_config=session_config) ]
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/device_lib.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Timeline visualization for TensorFlow using Chrome Trace Format.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import re # The timeline target is usually imported as part of BUILD target # "platform_test", which includes also includes the "platform" # dependency. This is why the logging import here is okay. from tensorflow.python.platform import tf_logging as logging class AllocationMaximum(collections.namedtuple( 'AllocationMaximum', ('timestamp', 'num_bytes', 'tensors'))): """Stores the maximum allocation for a given allocator within the timelne. Parameters: timestamp: `tensorflow::Env::NowMicros()` when this maximum was reached. num_bytes: the total memory used at this time. tensors: the set of tensors allocated at this time. """ pass class StepStatsAnalysis(collections.namedtuple( 'StepStatsAnalysis', ('chrome_trace', 'allocator_maximums'))): """Stores the step stats analysis output. Parameters: chrome_trace: A dict containing the chrome trace analysis. allocator_maximums: A dict mapping allocator names to AllocationMaximum. """ pass class _ChromeTraceFormatter(object): """A helper class for generating traces in Chrome Trace Format.""" def __init__(self, show_memory=False): """Constructs a new Chrome Trace formatter.""" self._show_memory = show_memory self._events = [] self._metadata = [] def _create_event(self, ph, category, name, pid, tid, timestamp): """Creates a new Chrome Trace event. For details of the file format, see: https://github.com/catapult-project/catapult/blob/master/tracing/README.md Args: ph: The type of event - usually a single character. category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. timestamp: The timestamp of this event as a long integer. Returns: A JSON compatible event object. """ event = {} event['ph'] = ph event['cat'] = category event['name'] = name event['pid'] = pid event['tid'] = tid event['ts'] = timestamp return event def emit_pid(self, name, pid): """Adds a process metadata event to the trace. Args: name: The process name as a string. pid: Identifier of the process as an integer. """ event = {} event['name'] = 'process_name' event['ph'] = 'M' event['pid'] = pid event['args'] = {'name': name} self._metadata.append(event) def emit_tid(self, name, pid, tid): """Adds a thread metadata event to the trace. Args: name: The thread name as a string. pid: Identifier of the process as an integer. tid: Identifier of the thread as an integer. """ event = {} event['name'] = 'thread_name' event['ph'] = 'M' event['pid'] = pid event['tid'] = tid event['args'] = {'name': name} self._metadata.append(event) def emit_region(self, timestamp, duration, pid, tid, category, name, args): """Adds a region event to the trace. Args: timestamp: The start timestamp of this region as a long integer. duration: The duration of this region as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. category: The event category as a string. name: The event name as a string. args: A JSON-compatible dictionary of event arguments. """ event = self._create_event('X', category, name, pid, tid, timestamp) event['dur'] = duration event['args'] = args self._events.append(event) def emit_obj_create(self, category, name, timestamp, pid, tid, object_id): """Adds an object creation event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer. """ event = self._create_event('N', category, name, pid, tid, timestamp) event['id'] = object_id self._events.append(event) def emit_obj_delete(self, category, name, timestamp, pid, tid, object_id): """Adds an object deletion event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer. """ event = self._create_event('D', category, name, pid, tid, timestamp) event['id'] = object_id self._events.append(event) def emit_obj_snapshot(self, category, name, timestamp, pid, tid, object_id, snapshot): """Adds an object snapshot event to the trace. Args: category: The event category as a string. name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. object_id: Identifier of the object as an integer. snapshot: A JSON-compatible representation of the object. """ event = self._create_event('O', category, name, pid, tid, timestamp) event['id'] = object_id event['args'] = {'snapshot': snapshot} self._events.append(event) def emit_flow_start(self, name, timestamp, pid, tid, flow_id): """Adds a flow start event to the trace. When matched with a flow end event (with the same 'flow_id') this will cause the trace viewer to draw an arrow between the start and end events. Args: name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. flow_id: Identifier of the flow as an integer. """ event = self._create_event('s', 'DataFlow', name, pid, tid, timestamp) event['id'] = flow_id self._events.append(event) def emit_flow_end(self, name, timestamp, pid, tid, flow_id): """Adds a flow end event to the trace. When matched with a flow start event (with the same 'flow_id') this will cause the trace viewer to draw an arrow between the start and end events. Args: name: The event name as a string. timestamp: The timestamp of this event as a long integer. pid: Identifier of the process generating this event as an integer. tid: Identifier of the thread generating this event as an integer. flow_id: Identifier of the flow as an integer. """ event = self._create_event('t', 'DataFlow', name, pid, tid, timestamp) event['id'] = flow_id self._events.append(event) def emit_counter(self, category, name, pid, timestamp, counter, value): """Emits a record for a single counter. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counter: Name of the counter as a string. value: Value of the counter as an integer. """ event = self._create_event('C', category, name, pid, 0, timestamp) event['args'] = {counter: value} self._events.append(event) def emit_counters(self, category, name, pid, timestamp, counters): """Emits a counter record for the dictionary 'counters'. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counters: Dictionary of counter values. """ event = self._create_event('C', category, name, pid, 0, timestamp) event['args'] = counters.copy() self._events.append(event) def format_to_string(self, pretty=False): """Formats the chrome trace to a string. Args: pretty: (Optional.) If True, produce human-readable JSON output. Returns: A JSON-formatted string in Chrome Trace format. """ trace = {} trace['traceEvents'] = self._metadata + self._events if pretty: return json.dumps(trace, indent=4, separators=(',', ': ')) else: return json.dumps(trace, separators=(',', ':')) class _TensorTracker(object): """An internal class to track the lifetime of a Tensor.""" def __init__(self, name, object_id, timestamp, pid, allocator, num_bytes): """Creates an object to track tensor references. This class is not thread safe and is intended only for internal use by the 'Timeline' class in this file. Args: name: The name of the Tensor as a string. object_id: Chrome Trace object identifier assigned for this Tensor. timestamp: The creation timestamp of this event as a long integer. pid: Process identifier of the associated device, as an integer. allocator: Name of the allocator used to create the Tensor. num_bytes: Number of bytes allocated (long integer). Returns: A 'TensorTracker' object. """ self._name = name self._pid = pid self._object_id = object_id self._create_time = timestamp self._allocator = allocator self._num_bytes = num_bytes self._ref_times = [] self._unref_times = [] @property def name(self): """Name of this tensor.""" return self._name @property def pid(self): """ID of the process which created this tensor (an integer).""" return self._pid @property def create_time(self): """Timestamp when this tensor was created (long integer).""" return self._create_time @property def object_id(self): """Returns the object identifier of this tensor (integer).""" return self._object_id @property def num_bytes(self): """Size of this tensor in bytes (long integer).""" return self._num_bytes @property def allocator(self): """Name of the allocator used to create this tensor (string).""" return self._allocator @property def last_unref(self): """Last unreference timestamp of this tensor (long integer).""" return max(self._unref_times) def add_ref(self, timestamp): """Adds a reference to this tensor with the specified timestamp. Args: timestamp: Timestamp of object reference as an integer. """ self._ref_times.append(timestamp) def add_unref(self, timestamp): """Adds an unref to this tensor with the specified timestamp. Args: timestamp: Timestamp of object unreference as an integer. """ self._unref_times.append(timestamp) class Timeline(object): """A class for visualizing execution timelines of TensorFlow steps.""" def __init__(self, step_stats, graph=None): """Constructs a new Timeline. A 'Timeline' is used for visualizing the execution of a TensorFlow computation. It shows the timings and concurrency of execution at the granularity of TensorFlow Ops. This class is not thread safe. Args: step_stats: The 'StepStats' proto recording execution times. graph: (Optional) The 'Graph' that was executed. """ self._step_stats = step_stats self._graph = graph self._chrome_trace = _ChromeTraceFormatter() self._next_pid = 0 self._device_pids = {} # device name -> pid for compute activity. self._tensor_pids = {} # device name -> pid for tensors. self._tensors = {} # tensor_name -> TensorTracker self._next_flow_id = 0 self._flow_starts = {} # tensor_name -> (timestamp, pid, tid) self._alloc_times = {} # tensor_name -> ( time, allocator, size ) self._allocator_maximums = {} # allocator name => maximum bytes long def _alloc_pid(self): """Allocate a process Id.""" pid = self._next_pid self._next_pid += 1 return pid def _alloc_flow_id(self): """Allocate a flow Id.""" flow_id = self._next_flow_id self._next_flow_id += 1 return flow_id def _parse_op_label(self, label): """Parses the fields in a node timeline label.""" # Expects labels of the form: name = op(arg, arg, ...). match = re.match(r'(.*) = (.*)\((.*)\)', label) if match is None: return 'unknown', 'unknown', [] nn, op, inputs = match.groups() if not inputs: inputs = [] else: inputs = inputs.split(', ') return nn, op, inputs def _assign_lanes(self): """Assigns non-overlapping lanes for the activities on each device.""" for device_stats in self._step_stats.dev_stats: # TODO(pbar): Genuine thread IDs in NodeExecStats might be helpful. lanes = [0] for ns in device_stats.node_stats: l = -1 for (i, lts) in enumerate(lanes): if ns.all_start_micros > lts: l = i lanes[l] = ns.all_start_micros + ns.all_end_rel_micros break if l < 0: l = len(lanes) lanes.append(ns.all_start_micros + ns.all_end_rel_micros) ns.thread_id = l def _emit_op(self, nodestats, pid, is_gputrace): """Generates a Chrome Trace event to show Op execution. Args: nodestats: The 'NodeExecStats' proto recording op execution. pid: The pid assigned for the device where this op ran. is_gputrace: If True then this op came from the GPUTracer. """ node_name = nodestats.node_name start = nodestats.all_start_micros duration = nodestats.all_end_rel_micros tid = nodestats.thread_id inputs = [] if is_gputrace: # Node names should always have the form 'name:op'. fields = node_name.split(':') + ['unknown'] node_name, op = fields[:2] elif node_name == 'RecvTensor': # RPC tracing does not use the standard timeline_label format. op = 'RecvTensor' else: _, op, inputs = self._parse_op_label(nodestats.timeline_label) args = {'name': node_name, 'op': op} for i, iname in enumerate(inputs): args['input%d' % i] = iname self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args) def _emit_tensor_snapshot(self, tensor, timestamp, pid, tid, value): """Generate Chrome Trace snapshot event for a computed Tensor. Args: tensor: A 'TensorTracker' object. timestamp: The timestamp of this snapshot as a long integer. pid: The pid assigned for showing the device where this op ran. tid: The tid of the thread computing the tensor snapshot. value: A JSON-compliant snapshot of the object. """ desc = str(value.tensor_description).replace('"', '') snapshot = {'tensor_description': desc} self._chrome_trace.emit_obj_snapshot('Tensor', tensor.name, timestamp, pid, tid, tensor.object_id, snapshot) def _produce_tensor(self, name, timestamp, tensors_pid, allocator, num_bytes): object_id = len(self._tensors) tensor = _TensorTracker(name, object_id, timestamp, tensors_pid, allocator, num_bytes) self._tensors[name] = tensor return tensor def _is_gputrace_device(self, device_name): """Returns true if this device is part of the GPUTracer logging.""" return '/stream:' in device_name or '/memcpy' in device_name def _allocate_pids(self): """Allocate fake process ids for each device in the StepStats.""" self._allocators_pid = self._alloc_pid() self._chrome_trace.emit_pid('Allocators', self._allocators_pid) # Add processes in the Chrome trace to show compute and data activity. for dev_stats in self._step_stats.dev_stats: device_pid = self._alloc_pid() self._device_pids[dev_stats.device] = device_pid tensors_pid = self._alloc_pid() self._tensor_pids[dev_stats.device] = tensors_pid self._chrome_trace.emit_pid(dev_stats.device + ' Compute', device_pid) self._chrome_trace.emit_pid(dev_stats.device + ' Tensors', tensors_pid) def _analyze_tensors(self, show_memory): """Analyze tensor references to track dataflow.""" for dev_stats in self._step_stats.dev_stats: device_pid = self._device_pids[dev_stats.device] tensors_pid = self._tensor_pids[dev_stats.device] for node_stats in dev_stats.node_stats: tid = node_stats.thread_id node_name = node_stats.node_name start_time = node_stats.all_start_micros end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros for index, output in enumerate(node_stats.output): if index: output_name = '%s:%d' % (node_name, index) else: output_name = node_name allocation = output.tensor_description.allocation_description num_bytes = allocation.requested_bytes allocator_name = allocation.allocator_name tensor = self._produce_tensor(output_name, start_time, tensors_pid, allocator_name, num_bytes) tensor.add_ref(start_time) tensor.add_unref(end_time) self._flow_starts[output_name] = (end_time, device_pid, tid) if show_memory: self._chrome_trace.emit_obj_create('Tensor', output_name, start_time, tensors_pid, tid, tensor.object_id) self._emit_tensor_snapshot(tensor, end_time - 1, tensors_pid, tid, output) def _show_compute(self, show_dataflow): """Visualize the computation activity.""" for dev_stats in self._step_stats.dev_stats: device_name = dev_stats.device device_pid = self._device_pids[device_name] is_gputrace = self._is_gputrace_device(device_name) for node_stats in dev_stats.node_stats: tid = node_stats.thread_id start_time = node_stats.all_start_micros end_time = node_stats.all_start_micros + node_stats.all_end_rel_micros self._emit_op(node_stats, device_pid, is_gputrace) if is_gputrace or node_stats.node_name == 'RecvTensor': continue _, _, inputs = self._parse_op_label(node_stats.timeline_label) for input_name in inputs: if input_name not in self._tensors: # This can happen when partitioning has inserted a Send/Recv. # We remove the numeric suffix so that the dataflow appears to # come from the original node. Ideally, the StepStats would # contain logging for the Send and Recv nodes. index = input_name.rfind('/_') if index > 0: input_name = input_name[:index] if input_name in self._tensors: tensor = self._tensors[input_name] tensor.add_ref(start_time) tensor.add_unref(end_time - 1) if show_dataflow: # We use a different flow ID for every graph edge. create_time, create_pid, create_tid = self._flow_starts[ input_name] # Don't add flows when producer and consumer ops are on the same # pid/tid since the horizontal arrows clutter the visualization. if create_pid != device_pid or create_tid != tid: flow_id = self._alloc_flow_id() self._chrome_trace.emit_flow_start(input_name, create_time, create_pid, create_tid, flow_id) self._chrome_trace.emit_flow_end(input_name, start_time, device_pid, tid, flow_id) else: logging.vlog(1, 'Can\'t find tensor %s - removed by CSE?', input_name) def _show_memory_counters(self): """Produce a counter series for each memory allocator.""" # Iterate over all tensor trackers to build a list of allocations and # frees for each allocator. Then sort the lists and emit a cumulative # counter series for each allocator. allocations = {} for name in self._tensors: tensor = self._tensors[name] self._chrome_trace.emit_obj_delete('Tensor', name, tensor.last_unref, tensor.pid, 0, tensor.object_id) allocator = tensor.allocator if allocator not in allocations: allocations[allocator] = [] num_bytes = tensor.num_bytes allocations[allocator].append((tensor.create_time, num_bytes, name)) allocations[allocator].append((tensor.last_unref, -num_bytes, name)) alloc_maxes = {} # Generate a counter series showing total allocations for each allocator. for allocator in allocations: alloc_list = allocations[allocator] alloc_list.sort() total_bytes = 0 alloc_tensor_set = set() alloc_maxes[allocator] = AllocationMaximum( timestamp=0, num_bytes=0, tensors=set()) for time, num_bytes, name in sorted( alloc_list, key=lambda allocation: allocation[0]): total_bytes += num_bytes if num_bytes < 0: alloc_tensor_set.discard(name) else: alloc_tensor_set.add(name) if total_bytes > alloc_maxes[allocator].num_bytes: alloc_maxes[allocator] = AllocationMaximum( timestamp=time, num_bytes=total_bytes, tensors=copy.deepcopy(alloc_tensor_set)) self._chrome_trace.emit_counter('Memory', allocator, self._allocators_pid, time, allocator, total_bytes) self._allocator_maximums = alloc_maxes def analyze_step_stats(self, show_dataflow=True, show_memory=True): self._allocate_pids() self._assign_lanes() self._analyze_tensors(show_memory) self._show_compute(show_dataflow) if show_memory: self._show_memory_counters() return StepStatsAnalysis( chrome_trace=self._chrome_trace, allocator_maximums=self._allocator_maximums) def generate_chrome_trace_format(self, show_dataflow=True, show_memory=False): """Produces a trace in Chrome Trace Format. Args: show_dataflow: (Optional.) If True, add flow events to the trace connecting producers and consumers of tensors. show_memory: (Optional.) If True, add object snapshot events to the trace showing the sizes and lifetimes of tensors. Returns: A JSON formatted string in Chrome Trace format. """ step_stats_analysis = self.analyze_step_stats( show_dataflow=show_dataflow, show_memory=show_memory) return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/timeline.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the SWIG-wrapped events writer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path from tensorflow.core.framework import summary_pb2 from tensorflow.core.util import event_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.framework import errors from tensorflow.python.framework import test_util from tensorflow.python.lib.io import tf_record from tensorflow.python.platform import googletest from tensorflow.python.util import compat class PywrapeventsWriterTest(test_util.TensorFlowTestCase): def testWriteEvents(self): file_prefix = os.path.join(self.get_temp_dir(), "events") writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(file_prefix)) filename = compat.as_text(writer.FileName()) event_written = event_pb2.Event( wall_time=123.45, step=67, summary=summary_pb2.Summary( value=[summary_pb2.Summary.Value( tag="foo", simple_value=89.0)])) writer.WriteEvent(event_written) writer.Flush() writer.Close() with self.assertRaises(errors.NotFoundError): for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"): self.assertTrue(False) reader = tf_record.tf_record_iterator(filename) event_read = event_pb2.Event() event_read.ParseFromString(next(reader)) self.assertTrue(event_read.HasField("file_version")) event_read.ParseFromString(next(reader)) # Second event self.assertProtoEquals(""" wall_time: 123.45 step: 67 summary { value { tag: 'foo' simple_value: 89.0 } } """, event_read) with self.assertRaises(StopIteration): next(reader) def testWriteEventInvalidType(self): class _Invalid(object): def __str__(self): return "Invalid" with self.assertRaisesRegexp(TypeError, "Invalid"): pywrap_tensorflow.EventsWriter(b"foo").WriteEvent(_Invalid()) if __name__ == "__main__": googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/events_writer_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session's ClusterSpec Propagation. These tests exercise the ClusterSpec Propagation capabilities of distributed Sessions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.protobuf import cluster_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.framework import common_shapes from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops # Import resource_variable_ops for the variables-to-tensor implicit conversion. from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.platform import test from tensorflow.python.training import server_lib # NOTE(mrry): Dummy shape registration for ops used in the tests, since they # don't have C++ op registrations on which to attach C++ shape fns. ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape) class SessionClusterSpecPropagationTest(test_util.TensorFlowTestCase): def testClusterSpecPropagationSimple(self): server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) const = constant_op.constant(17) sess = session.Session(server1.target, config=config) output = self.evaluate(const) self.assertEqual(17, output) def testClusterSpecPropagationWorker2Placement(self): server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with ops.Graph().as_default() as g, ops.device('/job:worker/task:1'): with ops.device('/cpu:0'): const = constant_op.constant(17) sess = session.Session(server1.target, config=config, graph=g) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() output = sess.run(const, options=run_options, run_metadata=run_metadata) self.assertEqual(17, output) self.assertEqual(1, len([ node_stats for dev_stats in run_metadata.step_stats.dev_stats for node_stats in dev_stats.node_stats if '/job:worker/replica:0/task:1/device:CPU:0' == dev_stats.device and 'Const' == node_stats.node_name ])) def testClusterSpecPropagationWorker1Placement(self): server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with ops.Graph().as_default() as g, ops.device('/job:worker/task:0'): const = constant_op.constant(17) with session.Session(server1.target, config=config, graph=g): output = self.evaluate(const) self.assertEqual(17, output) def testCanonicalDeviceNames(self): server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with ops.Graph().as_default() as g, ops.device( '/job:worker/task:1/device:CPU:0'): const = constant_op.constant(17) sess = session.Session(server1.target, config=config, graph=g) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() output = sess.run(const, options=run_options, run_metadata=run_metadata) self.assertEqual(17, output) self.assertEqual(1, len([ node_stats for dev_stats in run_metadata.step_stats.dev_stats for node_stats in dev_stats.node_stats if '/job:worker/replica:0/task:1/device:CPU:0' == dev_stats.device and 'Const' == node_stats.node_name ])) def testFullDeviceNames(self): server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'renamed_worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with ops.Graph().as_default() as g, ops.device( '/job:renamed_worker/replica:0/task:1/device:CPU:0'): const = constant_op.constant(17) sess = session.Session(server1.target, config=config, graph=g) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() output = sess.run(const, options=run_options, run_metadata=run_metadata) self.assertEqual(17, output) self.assertEqual(1, len([ node_stats for dev_stats in run_metadata.step_stats.dev_stats for node_stats in dev_stats.node_stats if '/job:renamed_worker/replica:0/task:1/device:CPU:0' == dev_stats.device and 'Const' == node_stats.node_name ])) def testMultipleLocalDevices(self): # Note: CPU->CPU transfers have a fast-path in # BaseRemoteRendezvous::SameWorkerRecvDone that means the test doesn't # actually capture the motivating bug unless run on a GPU machine. # # Example error message (before bugfix -- line breaks added because lint): # # W0718 17:14:41.521534 190121 device_mgr.cc:107] Unknown device: # /job:worker/replica:0/task:0/device:CPU:0 all devices: # /job:local/replica:0/task:0/device:GPU:0, # /job:local/replica:0/task:0/device:GPU:0, # /job:local/replica:0/task:0/cpu:1, CPU:0, GPU:0, # /job:local/replica:0/task:0/device:CPU:1, # /job:local/replica:0/task:0/device:CPU:0, CPU:1, # /job:local/replica:0/task:0/cpu:0 server_config = config_pb2.ConfigProto(device_count={'CPU': 2}) server1 = server_lib.Server.create_local_server(config=server_config) server2 = server_lib.Server.create_local_server(config=server_config) cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with ops.Graph().as_default() as g: with ops.device('/job:worker/task:1/cpu:1'): input1 = constant_op.constant(17, dtypes.float32) with ops.device('/job:worker/task:0/cpu:1'): input2 = constant_op.constant(3, dtypes.float32) with ops.device('/job:worker/task:1/cpu:0'): sum1 = input1 + input2 if test.is_gpu_available(): device_str = '/job:worker/task:0/device:GPU:0' else: device_str = '/job:worker/task:0/cpu:1' with ops.device(device_str): sum2 = input2 + input1 with ops.device('/job:worker/task:0/cpu:0'): sum3 = sum1 + sum2 with session.Session(server1.target, config=config, graph=g): output = self.evaluate(sum3) self.assertEqual(40, output) def testLegacyDeviceNames(self): server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with ops.Graph().as_default() as g, ops.device('/job:worker/task:1/cpu:0'): const = constant_op.constant(17) sess = session.Session(server1.target, config=config, graph=g) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() output = sess.run(const, options=run_options, run_metadata=run_metadata) self.assertEqual(17, output) self.assertEqual(1, len([ node_stats for dev_stats in run_metadata.step_stats.dev_stats for node_stats in dev_stats.node_stats if '/job:worker/replica:0/task:1/device:CPU:0' == dev_stats.device and 'Const' == node_stats.node_name ])) def testClusterSpecPropagationThreeServers2Graphs(self): """Boots 3 servers, creates 2 sessions, ensures appropriate operations. We create 2 clusterspecs: 1. server2 as the master, server1 as a worker 2. server2 as the master, server3 as a worker We ensure that variables on the workers are independent. """ server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() server3 = server_lib.Server.create_local_server() cluster_def1 = cluster_pb2.ClusterDef() job1 = cluster_def1.job.add() job1.name = 'worker1' job1.tasks[0] = server2.target[len('grpc://'):] job1.tasks[1] = server1.target[len('grpc://'):] cluster_def2 = cluster_pb2.ClusterDef() job2 = cluster_def2.job.add() job2.name = 'worker2' job2.tasks[0] = server2.target[len('grpc://'):] job2.tasks[1] = server3.target[len('grpc://'):] config1 = config_pb2.ConfigProto(cluster_def=cluster_def1) config2 = config_pb2.ConfigProto(cluster_def=cluster_def2) with ops.Graph().as_default() as g1: with ops.device('/job:worker1/task:1'): var1 = variables.Variable(array_ops.zeros([2]), name='var1') update_op1 = state_ops.assign_add( var1, array_ops.ones([2]), name='var1_assign_add') init1 = variables.global_variables_initializer() with ops.Graph().as_default() as g2: with ops.device('/job:worker2/task:1'): var2 = variables.Variable(array_ops.zeros([2]), name='var2') update_op2 = state_ops.assign_add( var2, array_ops.ones([2]), name='var2_assign_add') init2 = variables.global_variables_initializer() sess1 = session.Session(server2.target, graph=g1, config=config1) sess2 = session.Session(server2.target, graph=g2, config=config2) init1.run(session=sess1) init2.run(session=sess2) expected_zeros = np.zeros([2]) expected_ones = np.ones([2]) self.assertAllEqual(expected_zeros, sess1.run(var1)) self.assertAllEqual(expected_zeros, sess2.run(var2)) self.assertAllEqual(expected_ones, sess1.run(update_op1)) self.assertAllEqual(expected_ones, sess1.run(var1)) self.assertAllEqual(expected_zeros, sess2.run(var2)) self.assertAllEqual(expected_ones, sess2.run(update_op2)) self.assertAllEqual(expected_ones + expected_ones, sess1.run(update_op1)) self.assertAllEqual(expected_ones, sess2.run(var2)) self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1)) def testClusterSpecPropagationThreeServers(self): """Boots 3 servers, creates 2 sessions, ensures appropriate operations. We create 2 clusterspecs: 1. server2 as the master, server1 as a worker 2. server2 as the master, server3 as a worker We ensure that variables on the workers are independent. """ server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() server3 = server_lib.Server.create_local_server() cluster_def1 = cluster_pb2.ClusterDef() job1 = cluster_def1.job.add() job1.name = 'worker' job1.tasks[0] = server2.target[len('grpc://'):] job1.tasks[1] = server1.target[len('grpc://'):] cluster_def2 = cluster_pb2.ClusterDef() job2 = cluster_def2.job.add() job2.name = 'worker' job2.tasks[0] = server2.target[len('grpc://'):] job2.tasks[1] = server3.target[len('grpc://'):] config1 = config_pb2.ConfigProto(cluster_def=cluster_def1) config2 = config_pb2.ConfigProto(cluster_def=cluster_def2) with ops.device('/job:worker/task:1'): var = variables.Variable(array_ops.zeros([2]), name='var') feed = array_ops.placeholder(dtypes.float32, shape=(2)) update_op = var.assign_add(feed) sess1 = session.Session(server2.target, config=config1) sess2 = session.Session(server2.target, config=config2) variables.global_variables_initializer().run(session=sess1) variables.global_variables_initializer().run(session=sess2) expected_zeros = np.zeros([2]) expected_ones = np.ones([2]) self.assertAllEqual(expected_zeros, sess1.run(var)) self.assertAllEqual(expected_zeros, sess2.run(var)) self.assertAllEqual(expected_ones, sess1.run(update_op, feed_dict={feed: expected_ones})) self.assertAllEqual(expected_ones, sess1.run(var)) self.assertAllEqual(expected_zeros, sess2.run(var)) self.assertAllEqual(expected_ones, sess2.run(update_op, feed_dict={feed: expected_ones})) self.assertAllEqual(expected_ones + expected_ones, sess1.run(update_op, feed_dict={feed: expected_ones})) self.assertAllEqual(expected_ones, sess2.run(var)) self.assertAllEqual(expected_ones + expected_ones, sess1.run(var)) def testClusterSpecPropagationThreeServersOneCluster(self): """Boots 3 servers, ensures appropriate communication across workers. Additionally, in this cluster, we ensure the master is not the 0-th worker. Note: this test only uses one session. """ server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() server3 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server3.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] job.tasks[2] = server1.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) # Add ops to the devices in non-linear order. with ops.device('/job:worker/task:1'): feed1 = array_ops.placeholder(dtypes.float32, shape=(2)) const1 = constant_op.constant(2.0) mul1 = const1 * feed1 with ops.device('/job:worker/task:2'): feed2 = array_ops.placeholder(dtypes.float32, shape=(2)) const2 = constant_op.constant(2.0) mul2 = const2 * feed2 with ops.device('/job:worker/task:0'): feed0 = array_ops.placeholder(dtypes.float32, shape=(2)) const0 = constant_op.constant(2.0) mul0 = const0 * feed0 sum_op = mul0 + mul1 + mul2 ones = np.ones([2]) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() # Run! with session.Session(server1.target, config=config) as sess: output = sess.run( sum_op, options=run_options, run_metadata=run_metadata, feed_dict={feed1: ones, feed2: ones, feed0: ones}) self.assertAllEqual(6 * ones, output) self.assertEqual( 3, len([ dev_stats.device for dev_stats in run_metadata.step_stats.dev_stats for node_stats in dev_stats.node_stats if '/job:worker/replica:0/task:' in dev_stats.device and node_stats.node_name.startswith('Const') ]), run_metadata) def testClusterSpecPropagationIsolation(self): """Test that two sessions using ClusterSpec propagation are isolated.""" server = server_lib.Server.create_local_server() init_value = array_ops.placeholder(dtypes.int32, shape=[]) v = variables.Variable(init_value) cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) sess1 = session.Session(server.target, config=config) sess2 = session.Session(server.target, config=config) # Initially, the variable is uninitialized in both sessions. with self.assertRaises(errors.FailedPreconditionError): sess1.run(v) with self.assertRaises(errors.FailedPreconditionError): sess2.run(v) # An update in sess1 should be visible in sess1 only. sess1.run(v.initializer, feed_dict={init_value: 37}) self.assertEqual(37, sess1.run(v)) with self.assertRaises(errors.FailedPreconditionError): sess2.run(v) # An update in sess2 should be visible in sess2 only. sess2.run(v.initializer, feed_dict={init_value: 86}) self.assertEqual(37, sess1.run(v)) self.assertEqual(86, sess2.run(v)) # Closing sess2 has no effect on the state of sess1. sess2.close() self.assertEqual(37, sess1.run(v)) # Subsequent sessions will not see the state of existing sessions. sess3 = session.Session(server.target, config=config) self.assertEqual(37, sess1.run(v)) with self.assertRaises(errors.FailedPreconditionError): sess3.run(v) def testClusterSpecPropagationNonIsolation(self): """Test that two sessions using ClusterSpec propagation shares state. For example, the updated Variable value are visible among all worker sessions registered in the same server. """ server = server_lib.Server.create_local_server() init_value = array_ops.placeholder(dtypes.int32, shape=[]) v = variables.Variable(init_value) cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) config.experimental.share_session_state_in_clusterspec_propagation = True sess1 = session.Session(server.target, config=config) sess2 = session.Session(server.target, config=config) # Initially, the variable is uninitialized in both sessions. with self.assertRaises(errors.FailedPreconditionError): sess1.run(v) with self.assertRaises(errors.FailedPreconditionError): sess2.run(v) # An update in sess1 should be visible in sess2. sess1.run(v.initializer, feed_dict={init_value: 37}) self.assertEqual(37, sess1.run(v)) self.assertEqual(37, sess2.run(v)) # Closing sess2 has no effect on the state of sess1. sess2.close() self.assertEqual(37, sess1.run(v)) # Subsequent sessions should see the state of existing sessions. sess3 = session.Session(server.target, config=config) self.assertEqual(37, sess1.run(v)) self.assertEqual(37, sess3.run(v)) def testClusterSpecPropagationNonIsolation2Graphs(self): """Creates 2 sessions with each own graph, ensures appropriate operations. We ensure that variables on the workers shares state. """ server = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) config.experimental.share_session_state_in_clusterspec_propagation = True with ops.Graph().as_default() as g1: var1 = variables.Variable(array_ops.zeros([2]), name='var') update_op1 = state_ops.assign_add( var1, array_ops.ones([2]), name='var1_assign_add') init1 = variables.global_variables_initializer() with ops.Graph().as_default() as g2: var2 = variables.Variable(array_ops.zeros([2]), name='var') update_op2 = state_ops.assign_add( var2, array_ops.ones([2]), name='var2_assign_add') sess1 = session.Session(server.target, graph=g1, config=config) sess2 = session.Session(server.target, graph=g2, config=config) expected_zeros = np.zeros([2]) expected_ones = np.ones([2]) init1.run(session=sess1) self.assertAllEqual(expected_zeros, sess1.run(var1)) self.assertAllEqual(expected_zeros, sess2.run(var2)) self.assertAllEqual(expected_ones, sess1.run(update_op1)) self.assertAllEqual(expected_ones, sess1.run(var1)) self.assertAllEqual(expected_ones, sess2.run(var2)) self.assertAllEqual(expected_ones + expected_ones, sess2.run(update_op2)) self.assertAllEqual(expected_ones + expected_ones, sess2.run(var2)) self.assertAllEqual(expected_ones + expected_ones, sess1.run(var1)) def testClusterSpecPropagationPartialRun(self): """Test successful partial run with ClusterSpec propagation.""" server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with ops.device('/job:worker/task:0'): a = array_ops.placeholder(dtypes.float32, shape=[]) with ops.device('/job:worker/task:1'): b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) with ops.device('/job:worker/task:0'): r2 = math_ops.multiply(r1, c) with session.Session(server1.target, config=config) as sess: h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) res = sess.partial_run(h, r2, feed_dict={c: 3}) self.assertEqual(9, res) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/session_clusterspec_prop_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the SWIG-wrapped device lib.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import device_lib from tensorflow.python.framework import test_util from tensorflow.python.platform import googletest from tensorflow.python.platform import test class DeviceLibTest(test_util.TensorFlowTestCase): def testListLocalDevices(self): devices = device_lib.list_local_devices() self.assertGreater(len(devices), 0) self.assertEqual(devices[0].device_type, "CPU") devices = device_lib.list_local_devices(config_pb2.ConfigProto()) self.assertGreater(len(devices), 0) self.assertEqual(devices[0].device_type, "CPU") # GPU test if test.is_gpu_available(): self.assertGreater(len(devices), 1) self.assertTrue("GPU" in [d.device_type for d in devices] or "SYCL" in [d.device_type for d in devices]) if __name__ == "__main__": googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/device_lib_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session's partial run APIs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.client import session from tensorflow.python.framework import common_shapes from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import googletest from tensorflow.python.training import server_lib # NOTE(mrry): Dummy shape registration for ops used in the tests, since they # don't have C++ op registrations on which to attach C++ shape fns. ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape) class PartialRunTest(test_util.TensorFlowTestCase): def RunTestPartialRun(self, sess): a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.multiply(r1, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 17 res = sess.partial_run(h, r2, feed_dict={c: temp}) self.assertEqual(153, res) # Call again on the same graph. h2 = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h2, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 18 res = sess.partial_run(h2, r2, feed_dict={c: temp}) self.assertEqual(162, res) def RunTestPartialRunIncomplete(self, sess): a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.multiply(r1, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) def RunTestConcurrentPartialRun(self, sess): a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.multiply(r1, c) h1 = sess.partial_run_setup([r1], [a, b, c]) h2 = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h1, r1, feed_dict={a: 1, b: 2}) self.assertEqual(3, res) temp = res * 19 res = sess.partial_run(h2, r1, feed_dict={a: temp, b: 9}) self.assertEqual(66, res) res = sess.partial_run(h2, r2, feed_dict={c: 7}) self.assertEqual(462, res) def RunTestManyPartialRun(self, sess): steps = 200 inputs = [] outputs = [] a = constant_op.constant(2.0, dtypes.float32) for i in xrange(steps): inputs.append(array_ops.placeholder(dtypes.float32, shape=[])) a = math_ops.multiply(a, inputs[i]) outputs.append(a) h = sess.partial_run_setup(outputs, inputs) for i in xrange(steps): res = sess.partial_run(h, outputs[i], feed_dict={inputs[i]: 1.0}) self.assertEqual(2.0, res) feed_dict = {} for i in xrange(steps): feed_dict[inputs[i]] = 1.0 res = sess.run(outputs, feed_dict) self.assertEqual(steps, len(res)) self.assertEqual(2.0, res[-1]) def RunTestRunAndPartialRun(self, sess): a = constant_op.constant(2.0, dtypes.float32) b = a * 2 c = b * 3 r1 = self.evaluate([b, c]) h = sess.partial_run_setup([b, c], []) r2 = sess.partial_run(h, [b, c]) self.assertEqual(r1, r2) def RunTestPartialRunMissingPlaceholderFeedException(self, sess): x = array_ops.placeholder(dtypes.float32, shape=()) fetches = [x * 2, x * 3] handle = sess.partial_run_setup(fetches=fetches, feeds=[]) with self.assertRaisesRegexp(errors.InvalidArgumentError, 'You must feed a value for placeholder'): sess.partial_run(handle, fetches[0]) def RunTestPartialRunUnspecifiedFeed(self, sess): a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) h = sess.partial_run_setup([r1], [a, b]) with self.assertRaisesRegexp(errors.InvalidArgumentError, 'was not specified in partial_run_setup.$'): sess.partial_run(h, r1, feed_dict={a: 1, b: 2, c: 3}) def RunTestPartialRunUnspecifiedFetch(self, sess): a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.multiply(a, c) h = sess.partial_run_setup([r1], [a, b, c]) with self.assertRaisesRegexp(errors.InvalidArgumentError, 'was not specified in partial_run_setup.$'): sess.partial_run(h, r2, feed_dict={a: 1, c: 3}) def RunTestPartialRunAlreadyFed(self, sess): a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.multiply(a, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) with self.assertRaisesRegexp(errors.InvalidArgumentError, 'has already been fed.$'): sess.partial_run(h, r2, feed_dict={a: 1, c: 3}) def RunTestPartialRunAlreadyFetched(self, sess): a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.multiply(a, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) with self.assertRaisesRegexp(errors.InvalidArgumentError, 'has already been fetched.$'): sess.partial_run(h, r1, feed_dict={c: 3}) def RunTestPartialRunEmptyFetches(self, sess): a = array_ops.placeholder(dtypes.float32) b = a * 2.0 h = sess.partial_run_setup(fetches=[b], feeds=[a]) sess.partial_run(h, [], {a: 3.0}) r = sess.partial_run(h, [b], {}) self.assertEqual([6.0], r) @test_util.run_deprecated_v1 def testInvalidPartialRunSetup(self): sess = session.Session() x = array_ops.placeholder(dtypes.float32, shape=[]) with self.assertRaisesRegexp( errors.InvalidArgumentError, 'specify at least one target to fetch or execute.'): sess.partial_run_setup(fetches=[], feeds=[x]) @test_util.run_deprecated_v1 def testPartialRunSetupNoFeedsPassed(self): sess = session.Session() r1 = constant_op.constant([6.0]) h = sess.partial_run_setup([r1]) result1 = sess.partial_run(h, r1) self.assertEqual([6.0], result1) @test_util.run_deprecated_v1 def testPartialRunDirect(self): self.RunTestPartialRun(session.Session()) @test_util.run_deprecated_v1 def testPartialRunIncompleteDirect(self): self.RunTestPartialRunIncomplete(session.Session()) @test_util.run_deprecated_v1 def testConcurrentPartialRunDirect(self): self.RunTestConcurrentPartialRun(session.Session()) @test_util.run_deprecated_v1 def testManyPartialRunDirect(self): self.RunTestManyPartialRun(session.Session()) @test_util.run_deprecated_v1 def testRunAndPartialRunDirect(self): self.RunTestRunAndPartialRun(session.Session()) @test_util.run_deprecated_v1 def testPartialRunMissingPlaceholderFeedExceptionDirect(self): self.RunTestPartialRunMissingPlaceholderFeedException(session.Session()) @test_util.run_deprecated_v1 def testPartialRunUnspecifiedFeedDirect(self): self.RunTestPartialRunUnspecifiedFeed(session.Session()) @test_util.run_deprecated_v1 def testPartialRunUnspecifiedFetchDirect(self): self.RunTestPartialRunUnspecifiedFetch(session.Session()) @test_util.run_deprecated_v1 def testPartialRunAlreadyFedDirect(self): self.RunTestPartialRunAlreadyFed(session.Session()) @test_util.run_deprecated_v1 def testPartialRunAlreadyFetchedDirect(self): self.RunTestPartialRunAlreadyFetched(session.Session()) @test_util.run_deprecated_v1 def testPartialRunEmptyFetchesDirect(self): self.RunTestPartialRunEmptyFetches(session.Session()) @test_util.run_deprecated_v1 def testPartialRunDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRun(session.Session(server.target)) @test_util.run_deprecated_v1 def testPartialRunIncompleteDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRunIncomplete(session.Session(server.target)) @test_util.run_deprecated_v1 def testConcurrentPartialRunDist(self): server = server_lib.Server.create_local_server() self.RunTestConcurrentPartialRun(session.Session(server.target)) @test_util.run_deprecated_v1 def testManyPartialRunDist(self): server = server_lib.Server.create_local_server() self.RunTestManyPartialRun(session.Session(server.target)) @test_util.run_deprecated_v1 def testRunAndPartialRunDist(self): server = server_lib.Server.create_local_server() self.RunTestRunAndPartialRun(session.Session(server.target)) @test_util.run_deprecated_v1 def testPartialRunMissingPlaceholderFeedExceptionDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRunMissingPlaceholderFeedException( session.Session(server.target)) @test_util.run_deprecated_v1 def testPartialRunUnspecifiedFeedDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRunUnspecifiedFeed(session.Session(server.target)) @test_util.run_deprecated_v1 def testPartialRunUnspecifiedFetchDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRunUnspecifiedFetch(session.Session(server.target)) @test_util.run_deprecated_v1 def testPartialRunAlreadyFedDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRunAlreadyFed(session.Session(server.target)) @test_util.run_deprecated_v1 def testPartialRunAlreadyFetchedDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRunAlreadyFetched(session.Session(server.target)) @test_util.run_deprecated_v1 def testPartialRunEmptyFetchesDist(self): server = server_lib.Server.create_local_server() self.RunTestPartialRunEmptyFetches(session.Session(server.target)) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/session_partial_run_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Support for launching graphs and executing operations. See the [Client](https://www.tensorflow.org/guide/graphs) guide. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.client.session import InteractiveSession from tensorflow.python.client.session import Session from tensorflow.python.framework import errors from tensorflow.python.framework.errors import OpError from tensorflow.python.framework.ops import get_default_session
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/client_lib.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A client interface for TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import re import threading import warnings import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import pywrap_tensorflow as tf_session from tensorflow.python.eager import context from tensorflow.python.eager import monitoring from tensorflow.python.framework import device from tensorflow.python.framework import error_interpolation from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import session_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.experimental import mixed_precision_global_state from tensorflow.python.util import compat from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util.tf_export import tf_export from tensorflow.python.util.compat import collections_abc _python_session_create_counter = monitoring.Counter( '/tensorflow/api/python/session_create_counter', 'Counter for number of sessions created in Python.') class SessionInterface(object): """Base class for implementations of TensorFlow client sessions.""" @property def graph(self): """The underlying TensorFlow graph, to be used in building Operations.""" raise NotImplementedError('graph') @property def sess_str(self): """The TensorFlow process to which this session will connect.""" raise NotImplementedError('sess_str') def run(self, fetches, feed_dict=None, options=None, run_metadata=None): """Runs operations in the session. See `BaseSession.run()` for details.""" raise NotImplementedError('run') def partial_run_setup(self, fetches, feeds=None): """Sets up the feeds and fetches for partial runs in the session.""" raise NotImplementedError('partial_run_setup') def partial_run(self, handle, fetches, feed_dict=None): """Continues the execution with additional feeds and fetches.""" raise NotImplementedError('partial_run') def _get_indexed_slices_value_from_fetches(fetched_vals): return ops.IndexedSlicesValue( fetched_vals[0], fetched_vals[1], fetched_vals[2] if len(fetched_vals) == 3 else None) def _get_feeds_for_indexed_slices(feed, feed_val): return list( zip([feed.values, feed.indices] if feed.dense_shape is None else [feed.values, feed.indices, feed.dense_shape], feed_val)) # List of extensions supported to convert run arguments into actual fetches and # feeds. # # Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2), # where the function signatures are: # fetch_fn : Type -> (list of Tensors, # lambda: list of fetched np.ndarray -> TypeVal) # feed_fn1 : Type, TypeVal -> list of (Tensor, value) # feed_fn2 : Type -> list of Tensors # # `fetch_fn` describes how to expand fetch into its # component Tensors and how to contract the fetched results back into # a single return value. # # Each feed function describes how to unpack a single fed value and map it to # feeds of one or more tensors and their corresponding values: `feed_fn1` is # used to feed a run, `feed_fn2` to set up a partial run. # # TODO(touts): We could reimplement these as specialized _FeedMapper # implementations after we refactor the feed handling code to use them. # # Eventually, this registration could be opened up to support custom Tensor # expansions. # pylint: disable=g-long-lambda _REGISTERED_EXPANSIONS = [ # SparseTensors are fetched as SparseTensorValues. They can be fed # SparseTensorValues or normal tuples. (sparse_tensor.SparseTensor, lambda fetch: ([ fetch.indices, fetch.values, fetch.dense_shape ], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)), lambda feed, feed_val: list( zip([feed.indices, feed.values, feed.dense_shape], feed_val)), lambda feed: [feed.indices, feed.values, feed.dense_shape]), # IndexedSlices are fetched as IndexedSlicesValues. They can be fed # IndexedSlicesValues or normal tuples. (ops.IndexedSlices, lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None else [fetch.values, fetch.indices, fetch.dense_shape ], _get_indexed_slices_value_from_fetches), _get_feeds_for_indexed_slices, lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else [feed.values, feed.indices, feed.dense_shape]), # The default catches all other types and performs no expansions. (object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]), lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed]) ] # pylint: enable=g-long-lambda def _convert_to_numpy_obj(numpy_dtype, obj): """Explicitly convert obj based on numpy type except for string type.""" return numpy_dtype(obj) if numpy_dtype is not object else str(obj) def register_session_run_conversion_functions( tensor_type, fetch_function, feed_function=None, feed_function_for_partial_run=None): """Register fetch and feed conversion functions for `tf.Session.run()`. This function registers a triple of conversion functions for fetching and/or feeding values of user-defined types in a call to tf.Session.run(). An example ```python class SquaredTensor(object): def __init__(self, tensor): self.sq = tf.square(tensor) #you can define conversion functions as follows: fetch_function = lambda squared_tensor:([squared_tensor.sq], lambda val: val[0]) feed_function = lambda feed, feed_val: [(feed.sq, feed_val)] feed_function_for_partial_run = lambda feed: [feed.sq] #then after invoking this register function, you can use as follows: session.run(squared_tensor1, feed_dict = {squared_tensor2 : some_numpy_array}) ``` Args: tensor_type: The type for which you want to register a conversion function. fetch_function: A callable that takes an object of type `tensor_type` and returns a tuple, where the first element is a list of `tf.Tensor` objects, and the second element is a callable that takes a list of ndarrays and returns an object of some value type that corresponds to `tensor_type`. fetch_function describes how to expand fetch into its component Tensors and how to contract the fetched results back into a single return value. feed_function: A callable that takes feed_key and feed_value as input, and returns a list of tuples (feed_tensor, feed_val), feed_key must have type `tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed function describes how to unpack a single fed value and map it to feeds of one or more tensors and their corresponding values. feed_function_for_partial_run: A callable for specifying tensor values to feed when setting up a partial run, which takes a `tensor_type` type object as input, and returns a list of Tensors. Raises: ValueError: If `tensor_type` has already been registered. """ for conversion_function in _REGISTERED_EXPANSIONS: if issubclass(conversion_function[0], tensor_type): raise ValueError('%s has already been registered so ignore it.' % tensor_type) _REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function, feed_function_for_partial_run)) def _is_attrs_instance(obj): """Returns True if the given obj is an instance of attrs-decorated class.""" return getattr(obj.__class__, '__attrs_attrs__', None) is not None def _get_attrs_values(obj): """Returns the list of values from an attrs instance.""" attrs = getattr(obj.__class__, '__attrs_attrs__') return [getattr(obj, a.name) for a in attrs] class _FetchMapper(object): """Definition of the interface provided by fetch mappers. Fetch mappers are utility classes used by the _FetchHandler to handle arbitrary structures for the `fetch` argument to `Session.run()`. The `fetch` argument can be of various shapes: single tensor or op, list of fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The structures can be arbitrarily nested. The low level run() API only wants a list of tensor or op names. The various `_FetchMapper` subclasses below take care of handling the different shapes: uniquifying the fetches, and constructing results with the original shape. """ def unique_fetches(self): """Return the list of unique tensors or ops needed by this fetch mapper. Returns: A list of tensors or ops. """ raise NotImplementedError('Must be implemented by subclasses') def build_results(self, values): """Build results that match the original shape of the fetch. Args: values: List of values returned by run(). The values correspond exactly to the list tensors or ops returned by unique_fetches(). Returns: A struct of the same shape as the original fetch object handled by this fetch mapper. In the returned struct, the original fetches are replaced by their fetched values. """ raise NotImplementedError('Must be implemented by subclasses') @staticmethod def for_fetch(fetch): """Creates fetch mapper that handles the structure of `fetch`. The default graph must be the one from which we want to fetch values when this function is called. Args: fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple, or dict. Returns: An instance of a subclass of `_FetchMapper` that handles the shape. """ if fetch is None: raise TypeError('Fetch argument %r has invalid type %r' % (fetch, type(fetch))) elif isinstance(fetch, (list, tuple)): # NOTE(touts): This is also the code path for namedtuples. return _ListFetchMapper(fetch) elif isinstance(fetch, collections_abc.Mapping): return _DictFetchMapper(fetch) elif _is_attrs_instance(fetch): return _AttrsFetchMapper(fetch) else: # Look for a handler in the registered expansions. for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS: if isinstance(fetch, tensor_type): fetches, contraction_fn = fetch_fn(fetch) return _ElementFetchMapper(fetches, contraction_fn) # Did not find anything. raise TypeError('Fetch argument %r has invalid type %r' % (fetch, type(fetch))) class _ElementFetchMapper(_FetchMapper): """Fetch mapper for singleton tensors and ops.""" def __init__(self, fetches, contraction_fn): """Creates an _ElementFetchMapper. This is the fetch mapper used for leaves in the fetch struct. Because of the expansions mechanism, a leaf can actually fetch more than one tensor. Also note that the fetches here can be just strings (tensor or op names) or any other object that the graph knows how to convert to a tensor, such as a Variable. So we have to run each fetch through `as_graph_element()` to get the corresponding tensor or op. Args: fetches: List of objects, as returned by a fetch_fn defined in _REGISTERED_EXPANSIONS. contraction_fn: Callable as returned by a fetch_fn. """ self._unique_fetches = [] for fetch in fetches: try: self._unique_fetches.append(ops.get_default_graph().as_graph_element( fetch, allow_tensor=True, allow_operation=True)) except TypeError as e: raise TypeError('Fetch argument %r has invalid type %r, ' 'must be a string or Tensor. (%s)' % (fetch, type(fetch), str(e))) except ValueError as e: raise ValueError('Fetch argument %r cannot be interpreted as a ' 'Tensor. (%s)' % (fetch, str(e))) except KeyError as e: raise ValueError('Fetch argument %r cannot be interpreted as a ' 'Tensor. (%s)' % (fetch, str(e))) self._contraction_fn = contraction_fn def unique_fetches(self): return self._unique_fetches def build_results(self, values): if not values: # 'Operation' case return None else: return self._contraction_fn(values) def _uniquify_fetches(fetch_mappers): """Uniquifies fetches from a list of fetch_mappers. This is a utility function used by _ListFetchMapper and _DictFetchMapper. It gathers all the unique fetches from a list of mappers and builds a list containing all of them but without duplicates (unique_fetches). It also returns a 2-D list of integers (values_indices) indicating at which index in unique_fetches the fetches of the mappers are located. This list is as follows: values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index Args: fetch_mappers: list of fetch mappers. Returns: A list of fetches. A 2-D list of integers. """ unique_fetches = [] value_indices = [] seen_fetches = object_identity.ObjectIdentityDictionary() for m in fetch_mappers: m_value_indices = [] for f in m.unique_fetches(): j = seen_fetches.get(f) if j is None: j = len(seen_fetches) seen_fetches[f] = j unique_fetches.append(f) m_value_indices.append(j) value_indices.append(m_value_indices) return unique_fetches, value_indices class _ListFetchMapper(_FetchMapper): """Fetch mapper for lists, tuples, and namedtuples.""" def __init__(self, fetches): """Creates a _ListFetchMapper. Args: fetches: List, tuple, or namedtuple of fetches. """ self._fetch_type = type(fetches) self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches] self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) def unique_fetches(self): return self._unique_fetches def build_results(self, values): # Create the list of results for each mapper. results = [] for m, vi in zip(self._mappers, self._value_indices): results.append(m.build_results([values[j] for j in vi])) # Return a value of the original type of the fetches. if issubclass(self._fetch_type, list): return results elif self._fetch_type == tuple: return tuple(results) else: # This is the code path for namedtuple. return self._fetch_type(*results) class _DictFetchMapper(_FetchMapper): """Fetch mapper for dicts.""" def __init__(self, fetches): """Creates a _DictFetchMapper. Args: fetches: Dict of fetches. """ self._fetch_type = type(fetches) self._keys = fetches.keys() self._mappers = [ _FetchMapper.for_fetch(fetch) for fetch in fetches.values() ] self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) def unique_fetches(self): return self._unique_fetches def build_results(self, values): results = self._fetch_type() for k, m, vi in zip(self._keys, self._mappers, self._value_indices): results[k] = m.build_results([values[j] for j in vi]) return results class _AttrsFetchMapper(_FetchMapper): """Fetch mapper for attrs decorated classes.""" def __init__(self, fetches): """Creates a _AttrsFetchMapper. Args: fetches: An instance of an attrs decorated class. """ values = _get_attrs_values(fetches) self._fetch_type = type(fetches) self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values] self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) def unique_fetches(self): return self._unique_fetches def build_results(self, values): results = [] for m, vi in zip(self._mappers, self._value_indices): results.append(m.build_results([values[j] for j in vi])) return self._fetch_type(*results) class _FetchHandler(object): """Handler for structured fetches. Given a graph, a user-provided structure for fetches, and a feed dict, this class takes care of generating a list of tensor names to fetch and op names to run for a low level `run()` call. Given the results of the low level run call, this class can also rebuild a result structure matching the user-provided structure for fetches, but containing the corresponding results. """ # TODO(touts): Make this class also take care of destructuring the feed # dict instead of doing it in the callers. def __init__(self, graph, fetches, feeds, feed_handles=None): """Creates a fetch handler. Args: graph: Graph of the fetches. Used to check for fetchability and to convert all fetches to tensors or ops as needed. fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple, or dict. feeds: A feed dict where keys are Tensors. feed_handles: A dict from feed Tensors to TensorHandle objects used as direct feeds. """ with graph.as_default(): self._fetch_mapper = _FetchMapper.for_fetch(fetches) self._fetches = [] self._targets = [] self._feeds = feeds self._feed_handles = ( feed_handles or object_identity.ObjectIdentityDictionary()) self._ops = [] self._fetch_handles = object_identity.ObjectIdentityDictionary() for fetch in self._fetch_mapper.unique_fetches(): if isinstance(fetch, ops.Operation): self._assert_fetchable(graph, fetch) self._targets.append(fetch) self._ops.append(True) else: self._assert_fetchable(graph, fetch.op) self._fetches.append(fetch) self._ops.append(False) # Remember the fetch if it is for a tensor handle. if (isinstance(fetch, ops.Tensor) and (fetch.op.type == 'GetSessionHandle' or fetch.op.type == 'GetSessionHandleV2')): self._fetch_handles[fetch] = fetch.op.inputs[0].dtype self._final_fetches = [x for x in self._fetches if x not in feeds] def _assert_fetchable(self, graph, op): if not graph.is_fetchable(op): raise errors.InaccessibleTensorError( 'Operation %r has been marked as not fetchable. Typically this' ' happens when it is defined in another function or code block.' ' Use return values,explicit Python locals or TensorFlow collections' ' to access it.' % op.name) def fetches(self): """Return the unique names of tensors to fetch. Returns: A list of strings. """ return self._final_fetches def targets(self): """Return the unique names of ops to run. Returns: A list of strings. """ return self._targets def build_results(self, session, tensor_values): """Build results matching the original fetch shape. `tensor_values` must be a list of the same length as the one returned by `fetches()`, and holding the requested fetch values. This method builds a struct with the same shape as the original `fetches` passed to the constructor, in which the fetches are replaced by their fetched value. Args: session: The enclosing session. Used for tensor handles. tensor_values: List of values matching the list returned by fetches(). Returns: A structure of the same shape as the original `fetches` argument but containing tensors or None (for fetched ops). """ full_values = [] assert len(self._final_fetches) == len(tensor_values) i = 0 j = 0 for is_op in self._ops: if is_op: full_values.append(None) else: # If the fetch was in the feeds, use the fed value, otherwise # use the returned value. if self._fetches[i] in self._feed_handles: # A fetch had a corresponding direct TensorHandle feed. Call eval() # to obtain the Tensor value from the TensorHandle. value = self._feed_handles[self._fetches[i]].eval() else: value = self._feeds.get(self._fetches[i]) if value is None: value = tensor_values[j] j += 1 dtype = self._fetch_handles.get(self._fetches[i]) if dtype: full_values.append(session_ops.TensorHandle(value, dtype, session)) else: full_values.append(value) i += 1 assert j == len(tensor_values) return self._fetch_mapper.build_results(full_values) def _name_list(tensor_list): """Utility function for transitioning to the new session API. Args: tensor_list: a list of `Tensor`s. Returns: A list of each `Tensor`s name (as byte arrays). """ return [compat.as_bytes(t.name) for t in tensor_list] class _DeviceAttributes(object): """Struct-like object describing a device's attributes. Each device has 3 key properties: - name: the fully-qualified TensorFlow path to the device. For example: /job:worker/replica:0/task:3/device:CPU:0 - device_type: the type of the device (e.g. CPU, GPU, TPU, etc.) - memory_limit_bytes: the maximum amount of memory available on the device (in bytes). """ def __init__(self, name, device_type, memory_limit_bytes, incarnation): self._name = device.canonical_name(name) self._device_type = device_type self._memory_limit_bytes = memory_limit_bytes self._incarnation = incarnation @property def name(self): return self._name @property def device_type(self): return self._device_type @property def memory_limit_bytes(self): return self._memory_limit_bytes @property def incarnation(self): return self._incarnation def __repr__(self): return '_DeviceAttributes(%s, %s, %d, %d)' % ( self.name, self.device_type, self.memory_limit_bytes, self.incarnation, ) class BaseSession(SessionInterface): """A class for interacting with a TensorFlow computation. The BaseSession enables incremental graph building with inline execution of Operations and evaluation of Tensors. """ def __init__(self, target='', graph=None, config=None): """Constructs a new TensorFlow session. Args: target: (Optional) The TensorFlow execution engine to connect to. graph: (Optional) The graph to be used. If this argument is None, the default graph will be used. config: (Optional) ConfigProto proto used to configure the session. If no config is specified, the global default will be used. The global default can be configured via the tf.config APIs. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while creating the TensorFlow session. TypeError: If one of the arguments has the wrong type. """ _python_session_create_counter.get_cell().increase_by(1) if graph is None: self._graph = ops.get_default_graph() else: if not isinstance(graph, ops.Graph): raise TypeError('graph must be a tf.Graph, but got %s' % type(graph)) self._graph = graph self._closed = False if target is not None: try: self._target = compat.as_bytes(target) except TypeError: if isinstance(target, config_pb2.ConfigProto): raise TypeError('target must be a string, but got %s.' ' Did you do "Session(config)" instead of' ' "Session(config=config)"?' % type(target)) raise TypeError('target must be a string, but got %s' % type(target)) else: self._target = None self._delete_lock = threading.Lock() self._dead_handles = [] if config is None: config = context.context().config if not isinstance(config, config_pb2.ConfigProto): raise TypeError('config must be a tf.ConfigProto, but got %s' % type(config)) if (mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled and config.graph_options.rewrite_options.auto_mixed_precision != rewriter_config_pb2.RewriterConfig.OFF): new_config = config_pb2.ConfigProto() new_config.CopyFrom(config) new_config.graph_options.rewrite_options.auto_mixed_precision = ( rewriter_config_pb2.RewriterConfig.ON) config = new_config elif (config.graph_options.rewrite_options.auto_mixed_precision != rewriter_config_pb2.RewriterConfig.ON): mixed_precision_global_state.non_mixed_precision_session_created = True self._config = config self._add_shapes = config.graph_options.infer_shapes self._session = None opts = tf_session.TF_NewSessionOptions(target=self._target, config=config) try: # pylint: disable=protected-access self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts) # pylint: enable=protected-access finally: tf_session.TF_DeleteSessionOptions(opts) def list_devices(self): """Lists available devices in this session. ```python devices = sess.list_devices() for d in devices: print(d.name) ``` Where: Each element in the list has the following properties name: A string with the full name of the device. ex: `/job:worker/replica:0/task:3/device:CPU:0` device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.) memory_limit: The maximum amount of memory available on the device. Note: depending on the device, it is possible the usable memory could be substantially less. Raises: tf.errors.OpError: If it encounters an error (e.g. session is in an invalid state, or network errors occur). Returns: A list of devices in the session. """ raw_device_list = tf_session.TF_SessionListDevices(self._session) device_list = [] size = tf_session.TF_DeviceListCount(raw_device_list) for i in range(size): name = tf_session.TF_DeviceListName(raw_device_list, i) device_type = tf_session.TF_DeviceListType(raw_device_list, i) memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i) incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i) device_list.append( _DeviceAttributes(name, device_type, memory, incarnation)) tf_session.TF_DeleteDeviceList(raw_device_list) return device_list def close(self): """Closes this session. Calling this method frees all resources associated with the session. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while closing the TensorFlow session. """ if self._session and not self._closed: self._closed = True tf_session.TF_CloseSession(self._session) def __del__(self): # cleanly ignore all exceptions try: self.close() except Exception: # pylint: disable=broad-except pass if self._session is not None: try: tf_session.TF_DeleteSession(self._session) except (AttributeError, TypeError): # At shutdown, `c_api_util`, `tf_session`, or # `tf_session.TF_DeleteSession` may have been garbage collected, causing # the above method calls to fail. In this case, silently leak since the # program is about to terminate anyway. pass self._session = None @property def graph(self): """The graph that was launched in this session.""" return self._graph @property def graph_def(self): """A serializable version of the underlying TensorFlow graph. Returns: A graph_pb2.GraphDef proto containing nodes for all of the Operations in the underlying TensorFlow graph. """ return self._graph.as_graph_def(add_shapes=self._add_shapes) @property def sess_str(self): return self._target def as_default(self): """Returns a context manager that makes this object the default session. Use with the `with` keyword to specify that calls to `tf.Operation.run` or `tf.Tensor.eval` should be executed in this session. ```python c = tf.constant(..) sess = tf.compat.v1.Session() with sess.as_default(): assert tf.compat.v1.get_default_session() is sess print(c.eval()) ``` To get the current default session, use `tf.compat.v1.get_default_session`. *N.B.* The `as_default` context manager *does not* close the session when you exit the context, and you must close the session explicitly. ```python c = tf.constant(...) sess = tf.compat.v1.Session() with sess.as_default(): print(c.eval()) # ... with sess.as_default(): print(c.eval()) sess.close() ``` Alternatively, you can use `with tf.compat.v1.Session():` to create a session that is automatically closed on exiting the context, including when an uncaught exception is raised. *N.B.* The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. *N.B.* Entering a `with sess.as_default():` block does not affect the current default graph. If you are using multiple graphs, and `sess.graph` is different from the value of `tf.compat.v1.get_default_graph`, you must explicitly enter a `with sess.graph.as_default():` block to make `sess.graph` the default graph. Returns: A context manager using this session as the default session. """ return ops.default_session(self) def run(self, fetches, feed_dict=None, options=None, run_metadata=None): """Runs operations and evaluates tensors in `fetches`. This method runs one "step" of TensorFlow computation, by running the necessary graph fragment to execute every `Operation` and evaluate every `Tensor` in `fetches`, substituting the values in `feed_dict` for the corresponding input values. The `fetches` argument may be a single graph element, or an arbitrarily nested list, tuple, namedtuple, dict, or OrderedDict containing graph elements at its leaves. A graph element can be one of the following types: * A `tf.Operation`. The corresponding fetched value will be `None`. * A `tf.Tensor`. The corresponding fetched value will be a numpy ndarray containing the value of that tensor. * A `tf.SparseTensor`. The corresponding fetched value will be a `tf.compat.v1.SparseTensorValue` containing the value of that sparse tensor. * A `get_tensor_handle` op. The corresponding fetched value will be a numpy ndarray containing the handle of that tensor. * A `string` which is the name of a tensor or operation in the graph. The value returned by `run()` has the same shape as the `fetches` argument, where the leaves are replaced by the corresponding values returned by TensorFlow. Example: ```python a = tf.constant([10, 20]) b = tf.constant([1.0, 2.0]) # 'fetches' can be a singleton v = session.run(a) # v is the numpy array [10, 20] # 'fetches' can be a list. v = session.run([a, b]) # v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the # 1-D array [1.0, 2.0] # 'fetches' can be arbitrary lists, tuples, namedtuple, dicts: MyData = collections.namedtuple('MyData', ['a', 'b']) v = session.run({'k1': MyData(a, b), 'k2': [b, a]}) # v is a dict with # v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and # 'b' (the numpy array [1.0, 2.0]) # v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array # [10, 20]. ``` The optional `feed_dict` argument allows the caller to override the value of tensors in the graph. Each key in `feed_dict` can be one of the following types: * If the key is a `tf.Tensor`, the value may be a Python scalar, string, list, or numpy ndarray that can be converted to the same `dtype` as that tensor. Additionally, if the key is a `tf.compat.v1.placeholder`, the shape of the value will be checked for compatibility with the placeholder. * If the key is a `tf.SparseTensor`, the value should be a `tf.compat.v1.SparseTensorValue`. * If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value should be a nested tuple with the same structure that maps to their corresponding values as above. Each value in `feed_dict` must be convertible to a numpy array of the dtype of the corresponding key. The optional `options` argument expects a [`RunOptions`] proto. The options allow controlling the behavior of this particular step (e.g. turning tracing on). The optional `run_metadata` argument expects a [`RunMetadata`] proto. When appropriate, the non-Tensor output of this step will be collected there. For example, when users turn on tracing in `options`, the profiled info will be collected into this argument and passed back. Args: fetches: A single graph element, a list of graph elements, or a dictionary whose values are graph elements or lists of graph elements (described above). feed_dict: A dictionary that maps graph elements to values (described above). options: A [`RunOptions`] protocol buffer run_metadata: A [`RunMetadata`] protocol buffer Returns: Either a single value if `fetches` is a single graph element, or a list of values if `fetches` is a list, or a dictionary with the same keys as `fetches` if that is a dictionary (described above). Order in which `fetches` operations are evaluated inside the call is undefined. Raises: RuntimeError: If this `Session` is in an invalid state (e.g. has been closed). TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type. ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a `Tensor` that doesn't exist. """ options_ptr = tf_session.TF_NewBufferFromString( compat.as_bytes(options.SerializeToString())) if options else None run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None try: result = self._run(None, fetches, feed_dict, options_ptr, run_metadata_ptr) if run_metadata: proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) run_metadata.ParseFromString(compat.as_bytes(proto_data)) finally: if run_metadata_ptr: tf_session.TF_DeleteBuffer(run_metadata_ptr) if options: tf_session.TF_DeleteBuffer(options_ptr) return result def partial_run(self, handle, fetches, feed_dict=None): """Continues the execution with more feeds and fetches. This is EXPERIMENTAL and subject to change. To use partial execution, a user first calls `partial_run_setup()` and then a sequence of `partial_run()`. `partial_run_setup` specifies the list of feeds and fetches that will be used in the subsequent `partial_run` calls. The optional `feed_dict` argument allows the caller to override the value of tensors in the graph. See run() for more information. Below is a simple example: ```python a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) c = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) r2 = math_ops.multiply(r1, c) h = sess.partial_run_setup([r1, r2], [a, b, c]) res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2}) res = sess.partial_run(h, r2, feed_dict={c: res}) ``` Args: handle: A handle for a sequence of partial runs. fetches: A single graph element, a list of graph elements, or a dictionary whose values are graph elements or lists of graph elements (see documentation for `run`). feed_dict: A dictionary that maps graph elements to values (described above). Returns: Either a single value if `fetches` is a single graph element, or a list of values if `fetches` is a list, or a dictionary with the same keys as `fetches` if that is a dictionary (see documentation for `run`). Raises: tf.errors.OpError: Or one of its subclasses on error. """ # TODO(touts): Support feeding and fetching the same tensor. return self._run(handle, fetches, feed_dict, None, None) def partial_run_setup(self, fetches, feeds=None): """Sets up a graph with feeds and fetches for partial run. This is EXPERIMENTAL and subject to change. Note that contrary to `run`, `feeds` only specifies the graph elements. The tensors will be supplied by the subsequent `partial_run` calls. Args: fetches: A single graph element, or a list of graph elements. feeds: A single graph element, or a list of graph elements. Returns: A handle for partial run. Raises: RuntimeError: If this `Session` is in an invalid state (e.g. has been closed). TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type. tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens. """ def _feed_fn(feed): for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS: if isinstance(feed, tensor_type): return feed_fn(feed) raise TypeError('Feed argument %r has invalid type %r' % (feed, type(feed))) # Check session. if self._closed: raise RuntimeError('Attempted to use a closed Session.') if self.graph.version == 0: raise RuntimeError('The Session graph is empty. Add operations to the ' 'graph before calling run().') if feeds is None: feeds = [] # Create request. feed_list = [] # Validate and process feed_list. is_list_feed = isinstance(feeds, (list, tuple)) if not is_list_feed: feeds = [feeds] for feed in feeds: for subfeed in _feed_fn(feed): try: subfeed_t = self.graph.as_graph_element( subfeed, allow_tensor=True, allow_operation=False) # pylint: disable=protected-access feed_list.append(subfeed_t._as_tf_output()) # pylint: enable=protected-access except Exception as e: e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message) e.args = (e.message,) raise e # Validate and process fetches. # TODO(touts): Support feeding and fetching the same tensor. fetch_handler = _FetchHandler(self._graph, fetches, object_identity.ObjectIdentityDictionary()) # Set up a graph with feeds and fetches for partial run. def _setup_fn(session, feed_list, fetch_list, target_list): self._extend_graph() return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list, fetch_list, target_list) # pylint: disable=protected-access final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()] final_targets = [op._c_op for op in fetch_handler.targets()] # pylint: enable=protected-access return self._do_call(_setup_fn, self._session, feed_list, final_fetches, final_targets) def _run(self, handle, fetches, feed_dict, options, run_metadata): """Perform either run or partial_run, depending the presence of `handle`.""" def _feed_fn(feed, feed_val): for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS: if isinstance(feed, tensor_type): return feed_fn(feed, feed_val) raise TypeError('Feed argument %r has invalid type %r' % (feed, type(feed))) # Check session. if self._closed: raise RuntimeError('Attempted to use a closed Session.') if self.graph.version == 0: raise RuntimeError('The Session graph is empty. Add operations to the ' 'graph before calling run().') # Create request. feed_dict_tensor = object_identity.ObjectIdentityDictionary() feed_map = {} # Validate and process feed_dict. feed_handles = {} if feed_dict: feed_dict = nest.flatten_dict_items(feed_dict) for feed, feed_val in feed_dict.items(): for subfeed, subfeed_val in _feed_fn(feed, feed_val): try: subfeed_t = self.graph.as_graph_element( subfeed, allow_tensor=True, allow_operation=False) except Exception as e: raise TypeError('Cannot interpret feed_dict key as Tensor: ' + e.args[0]) if isinstance(subfeed_val, ops.Tensor): raise TypeError('The value of a feed cannot be a tf.Tensor object. ' 'Acceptable feed values include Python scalars, ' 'strings, lists, numpy ndarrays, or TensorHandles. ' 'For reference, the tensor object was ' + str(feed_val) + ' which was passed to the ' 'feed with key ' + str(feed) + '.') subfeed_dtype = subfeed_t.dtype.as_numpy_dtype if isinstance(subfeed_val, int) and _convert_to_numpy_obj( subfeed_dtype, subfeed_val) != subfeed_val: raise TypeError( 'Type of feed value ' + str(subfeed_val) + ' with type ' + str(type(subfeed_val)) + ' is not compatible with Tensor type ' + str(subfeed_dtype) + '. Try explicitly setting the type of the feed tensor' ' to a larger type (e.g. int64).') is_tensor_handle_feed = isinstance(subfeed_val, session_ops.TensorHandle) if is_tensor_handle_feed: np_val = subfeed_val.to_numpy_array() feed_handles[subfeed_t] = subfeed_val else: np_val = np.asarray(subfeed_val, dtype=subfeed_dtype) if (not is_tensor_handle_feed and not subfeed_t.get_shape().is_compatible_with(np_val.shape)): raise ValueError( 'Cannot feed value of shape %r for Tensor %r, ' 'which has shape %r' % (np_val.shape, subfeed_t.name, str(subfeed_t.get_shape()))) if not self.graph.is_feedable(subfeed_t): raise ValueError('Tensor %s may not be fed.' % subfeed_t) feed_dict_tensor[subfeed_t] = np_val feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val) # Create a fetch handler to take care of the structure of fetches. fetch_handler = _FetchHandler( self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles) # Run request and get response. # We need to keep the returned movers alive for the following _do_run(). # These movers are no longer needed when _do_run() completes, and # are deleted when `movers` goes out of scope when this _run() ends. # TODO(yuanbyu, keveman): Revisit whether we should just treat feeding # of a handle from a different device as an error. _ = self._update_with_movers(feed_dict_tensor, feed_map) final_fetches = fetch_handler.fetches() final_targets = fetch_handler.targets() # We only want to really perform the run if fetches or targets are provided, # or if the call is a partial run that specifies feeds. if final_fetches or final_targets or (handle and feed_dict_tensor): results = self._do_run(handle, final_targets, final_fetches, feed_dict_tensor, options, run_metadata) else: results = [] return fetch_handler.build_results(self, results) def make_callable(self, fetches, feed_list=None, accept_options=False): """Returns a Python callable that runs a particular step. The returned callable will take `len(feed_list)` arguments whose types must be compatible feed values for the respective elements of `feed_list`. For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th argument to the returned callable must be a numpy ndarray (or something convertible to an ndarray) with matching element type and shape. See `tf.Session.run` for details of the allowable feed key and value types. The returned callable will have the same return type as `tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`, the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`, it will return `None`. Args: fetches: A value or list of values to fetch. See `tf.Session.run` for details of the allowable fetch types. feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run` for details of the allowable feed key types. accept_options: (Optional.) If `True`, the returned `Callable` will be able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata` as optional keyword arguments `options` and `run_metadata`, respectively, with the same syntax and semantics as `tf.Session.run`, which is useful for certain use cases (profiling and debugging) but will result in measurable slowdown of the `Callable`'s performance. Default: `False`. Returns: A function that when called will execute the step defined by `feed_list` and `fetches` in this session. Raises: TypeError: If `fetches` or `feed_list` cannot be interpreted as arguments to `tf.Session.run`. """ if feed_list is not None: if not isinstance(feed_list, (list, tuple)): raise TypeError('`feed_list` must be a list or tuple.') # Delegate any non-empty feed lists to the existing `run()` logic. # TODO(mrry): Refactor the feed handling logic from # `Session._run()` so that we can convert the feeds to a list of # strings here. def _generic_run(*feed_args, **kwargs): feed_dict = { feed: feed_val for feed, feed_val in zip(feed_list, feed_args) } return self.run(fetches, feed_dict=feed_dict, **kwargs) return _generic_run # Ensure any changes to the graph are reflected in the runtime. # Note that we don't need to do this on subsequent calls to the # returned object, because the arguments to `fetches` must already be # in the graph. self._extend_graph() # Create a fetch handler to take care of the structure of fetches. fetch_handler = _FetchHandler(self._graph, fetches, object_identity.ObjectIdentityDictionary()) # pylint: disable=protected-access fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()] target_list = [op._c_op for op in fetch_handler.targets()] # pylint: enable=protected-access def _callable_template_with_options_and_metadata(fetch_list, target_list, fetch_handler, options=None, run_metadata=None): """Template callable that accepts RunOptions and RunMetadata.""" options_ptr = tf_session.TF_NewBufferFromString( compat.as_bytes(options.SerializeToString())) if options else None run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None try: results = self._call_tf_sessionrun(options_ptr, {}, fetch_list, target_list, run_metadata_ptr) if fetch_handler: results = fetch_handler.build_results(self, results) else: results = results[0] if results else None if run_metadata: proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) run_metadata.ParseFromString(compat.as_bytes(proto_data)) finally: if run_metadata_ptr: tf_session.TF_DeleteBuffer(run_metadata_ptr) if options: tf_session.TF_DeleteBuffer(options_ptr) return results if accept_options: return functools.partial(_callable_template_with_options_and_metadata, fetch_list, target_list, fetch_handler) elif isinstance(fetches, ops.Operation): # Special case for fetching a single operation, because the # function will have no return value. assert not fetch_list assert len(target_list) == 1 def _single_operation_run(): self._call_tf_sessionrun(None, {}, [], target_list, None) return _single_operation_run elif isinstance(fetches, ops.Tensor): # Special case for fetching a single tensor, because the # function can return the result of `TF_Run()` directly. assert len(fetch_list) == 1 assert not target_list def _single_tensor_run(): results = self._call_tf_sessionrun(None, {}, fetch_list, [], None) return results[0] return _single_tensor_run else: # In all other cases, we must use `fetch_handler` to build the # results for us. def _fetch_handler_run(): results = self._call_tf_sessionrun(None, {}, fetch_list, target_list, None) return fetch_handler.build_results(self, results) return _fetch_handler_run # Captures the name of a node in an error status. The regex below matches # both the old and the new formats: # Old format: [[Node: <node_name> = ...]] # New format: [[{{node <node_name>}} = ...]] _NODEDEF_NAME_RE = re.compile( r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*') def _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata): """Runs a step based on the given fetches and feeds. Args: handle: a handle for partial_run. None if this is just a call to run(). target_list: A list of operations to be run, but not fetched. fetch_list: A list of tensors to be fetched. feed_dict: A dictionary that maps tensors to numpy ndarrays. options: A (pointer to a) [`RunOptions`] protocol buffer, or None run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None Returns: A list of numpy ndarrays, corresponding to the elements of `fetch_list`. If the ith element of `fetch_list` contains the name of an operation, the first Tensor output of that operation will be returned for that element. Raises: tf.errors.OpError: Or one of its subclasses on error. """ # pylint: disable=protected-access feeds = dict((t._as_tf_output(), v) for t, v in feed_dict.items()) fetches = [t._as_tf_output() for t in fetch_list] targets = [op._c_op for op in target_list] # pylint: enable=protected-access def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata): # Ensure any changes to the graph are reflected in the runtime. self._extend_graph() return self._call_tf_sessionrun(options, feed_dict, fetch_list, target_list, run_metadata) def _prun_fn(handle, feed_dict, fetch_list): if target_list: raise RuntimeError('partial_run() requires empty target_list.') return self._call_tf_sessionprun(handle, feed_dict, fetch_list) if handle is None: return self._do_call(_run_fn, feeds, fetches, targets, options, run_metadata) else: return self._do_call(_prun_fn, handle, feeds, fetches) def _do_call(self, fn, *args): try: return fn(*args) except errors.OpError as e: message = compat.as_text(e.message) m = BaseSession._NODEDEF_NAME_RE.search(message) node_def = None op = None if m is not None: node_name = m.group(3) try: op = self._graph.get_operation_by_name(node_name) node_def = op.node_def except KeyError: pass message = error_interpolation.interpolate(message, self._graph) if 'only supports NHWC tensor format' in message: message += ('\nA possible workaround: Try disabling Grappler optimizer' '\nby modifying the config for creating the session eg.' '\nsession_config.graph_options.rewrite_options.' 'disable_meta_optimizer = True') raise type(e)(node_def, op, message) def _extend_graph(self): with self._graph._session_run_lock(): # pylint: disable=protected-access tf_session.ExtendSession(self._session) # The threshold to run garbage collection to delete dead tensors. _DEAD_HANDLES_THRESHOLD = 10 def _register_dead_handle(self, handle): # Register a dead handle in the session. Delete the dead tensors when # the number of dead tensors exceeds certain threshold. tensors_to_delete = None with self._delete_lock: self._dead_handles.append(handle) if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD: tensors_to_delete = self._dead_handles self._dead_handles = [] # Delete the dead tensors. if tensors_to_delete: feeds = {} fetches = [] for deleter_key, tensor_handle in enumerate(tensors_to_delete): holder, deleter = session_ops._get_handle_deleter( self.graph, deleter_key, tensor_handle) feeds[holder] = tensor_handle fetches.append(deleter) self.run(fetches, feed_dict=feeds) def _update_with_movers(self, feed_dict, feed_map): # If a tensor handle that is fed to a device incompatible placeholder, # we move the tensor to the right device, generate a new tensor handle, # and update `feed_dict` to use the new handle. handle_movers = [] for feed_name, val in feed_map.items(): mover = session_ops._get_handle_mover(self.graph, *val) if mover: handle_movers.append((feed_name, val[1], mover)) # Transfer a tensor to the right device if needed. if not handle_movers: return [] else: feeds = {} fetches = [] for _, handle, mover in handle_movers: feeds[mover[0]] = handle fetches.append(mover[1]) handles = self.run(fetches, feed_dict=feeds) for handle_mover, handle in zip(handle_movers, handles): np_val = np.array(handle.handle, dtype=np.object) feed_name = handle_mover[0] feed_tensor = feed_map[feed_name][0] feed_dict[feed_tensor] = np_val return handles def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list, run_metadata): return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict, fetch_list, target_list, run_metadata) def _call_tf_sessionprun(self, handle, feed_dict, fetch_list): return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict, fetch_list) # pylint: disable=protected-access class _Callable(object): """Experimental wrapper for the C++ `Session::MakeCallable()` API.""" def __init__(self, session, callable_options): self._session = session self._handle = None options_ptr = tf_session.TF_NewBufferFromString( compat.as_bytes(callable_options.SerializeToString())) try: self._handle = tf_session.TF_SessionMakeCallable( session._session, options_ptr) finally: tf_session.TF_DeleteBuffer(options_ptr) def __call__(self, *args, **kwargs): # TODO(b/74355905): Support argument and return value nested structures, # and tensor-like objects such as SparseTensors. run_metadata = kwargs.get('run_metadata', None) try: run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None ret = tf_session.TF_SessionRunCallable(self._session._session, self._handle, args, run_metadata_ptr) if run_metadata: proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) run_metadata.ParseFromString(compat.as_bytes(proto_data)) finally: if run_metadata_ptr: tf_session.TF_DeleteBuffer(run_metadata_ptr) return ret def __del__(self): # NOTE(mrry): It is possible that `self._session.__del__()` could be # called before this destructor, in which case `self._session._session` # will be `None`. if (self._handle is not None and self._session._session is not None and not self._session._closed): tf_session.TF_SessionReleaseCallable(self._session._session, self._handle) # pylint: enable=protected-access # TODO(b/74355905): Reimplement `Session.make_callable()` using this method # where possible. def _make_callable_from_options(self, callable_options): """Returns a handle to a "callable" with the given options. Args: callable_options: A `CallableOptions` protocol buffer message describing the computation that will be performed by the callable. Returns: A handle to the new callable. """ self._extend_graph() return BaseSession._Callable(self, callable_options) @tf_export(v1=['Session']) class Session(BaseSession): """A class for running TensorFlow operations. A `Session` object encapsulates the environment in which `Operation` objects are executed, and `Tensor` objects are evaluated. For example: ```python # Build a graph. a = tf.constant(5.0) b = tf.constant(6.0) c = a * b # Launch the graph in a session. sess = tf.compat.v1.Session() # Evaluate the tensor `c`. print(sess.run(c)) ``` A session may own resources, such as `tf.Variable`, `tf.queue.QueueBase`, and `tf.compat.v1.ReaderBase`. It is important to release these resources when they are no longer required. To do this, either invoke the `tf.Session.close` method on the session, or use the session as a context manager. The following two examples are equivalent: ```python # Using the `close()` method. sess = tf.compat.v1.Session() sess.run(...) sess.close() # Using the context manager. with tf.compat.v1.Session() as sess: sess.run(...) ``` The [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) protocol buffer exposes various configuration options for a session. For example, to create a session that uses soft constraints for device placement, and log the resulting placement decisions, create a session as follows: ```python # Launch the graph in a session that allows soft device placement and # logs the placement decisions. sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto( allow_soft_placement=True, log_device_placement=True)) ``` """ def __init__(self, target='', graph=None, config=None): """Creates a new TensorFlow session. If no `graph` argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with `tf.Graph()`) in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. Defaults to using an in-process engine. See [Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for more examples. graph: (Optional.) The `Graph` to be launched (described above). config: (Optional.) A [`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) protocol buffer with configuration options for the session. """ super(Session, self).__init__(target, graph, config=config) # NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle. self._default_graph_context_manager = None self._default_session_context_manager = None def __enter__(self): if self._default_graph_context_manager is None: self._default_graph_context_manager = self.graph.as_default() else: raise RuntimeError('Session context managers are not re-entrant. ' 'Use `Session.as_default()` if you want to enter ' 'a session multiple times.') if self._default_session_context_manager is None: self._default_session_context_manager = self.as_default() self._default_graph_context_manager.__enter__() return self._default_session_context_manager.__enter__() def __exit__(self, exec_type, exec_value, exec_tb): if exec_type is errors.OpError: logging.error('Session closing due to OpError: %s', (exec_value,)) try: self._default_session_context_manager.__exit__(exec_type, exec_value, exec_tb) except RuntimeError as error: if error == exec_value: # NOTE(skyewm): for some reason, in Python3, # _default_session_context_manager.__exit__ will re-raise the "not # re-entrant" exception raised in __enter__ above (note that if we're # here, we're in the outer session context manager, since __exit__ is # not called when __enter__ raises an exception). We still want to # continue cleaning up this context manager before the exception is # further propagated, so we ignore it here (note that it'll continue # being propagated after this method completes). pass else: raise self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb) self._default_session_context_manager = None self._default_graph_context_manager = None # If we are closing due to an exception, set a time limit on our Close() to # avoid blocking forever. # TODO(b/120204635) remove this when deadlock is fixed. if exec_type: close_thread = threading.Thread( name='SessionCloseThread', target=self.close) close_thread.daemon = True close_thread.start() close_thread.join(30.0) if close_thread.is_alive(): logging.error( 'Session failed to close after 30 seconds. Continuing after this ' 'point may leave your program in an undefined state.') else: self.close() @staticmethod def reset(target, containers=None, config=None): """Resets resource containers on `target`, and close all connected sessions. A resource container is distributed across all workers in the same cluster as `target`. When a resource container on `target` is reset, resources associated with that container will be cleared. In particular, all Variables in the container will become undefined: they lose their values and shapes. NOTE: (i) reset() is currently only implemented for distributed sessions. (ii) Any sessions on the master named by `target` will be closed. If no resource containers are provided, all containers are reset. Args: target: The execution engine to connect to. containers: A list of resource container name strings, or `None` if all of all the containers are to be reset. config: (Optional.) Protocol buffer with configuration options. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while resetting containers. """ if target is not None: target = compat.as_bytes(target) if containers is not None: containers = [compat.as_bytes(c) for c in containers] else: containers = [] tf_session.TF_Reset(target, containers, config) @tf_export(v1=['InteractiveSession']) class InteractiveSession(BaseSession): """A TensorFlow `Session` for use in interactive contexts, such as a shell. The only difference with a regular `Session` is that an `InteractiveSession` installs itself as the default session on construction. The methods `tf.Tensor.eval` and `tf.Operation.run` will use that session to run ops. This is convenient in interactive shells and [IPython notebooks](http://ipython.org), as it avoids having to pass an explicit `Session` object to run ops. For example: ```python sess = tf.compat.v1.InteractiveSession() a = tf.constant(5.0) b = tf.constant(6.0) c = a * b # We can just use 'c.eval()' without passing 'sess' print(c.eval()) sess.close() ``` Note that a regular session installs itself as the default session when it is created in a `with` statement. The common usage in non-interactive programs is to follow that pattern: ```python a = tf.constant(5.0) b = tf.constant(6.0) c = a * b with tf.compat.v1.Session(): # We can also use 'c.eval()' here. print(c.eval()) ``` """ _count_lock = threading.Lock() _active_session_count = 0 # GUARDED_BY(_count_lock) def __init__(self, target='', graph=None, config=None): """Creates a new interactive TensorFlow session. If no `graph` argument is specified when constructing the session, the default graph will be launched in the session. If you are using more than one graph (created with `tf.Graph()`) in the same process, you will have to use different sessions for each graph, but each graph can be used in multiple sessions. In this case, it is often clearer to pass the graph to be launched explicitly to the session constructor. Args: target: (Optional.) The execution engine to connect to. Defaults to using an in-process engine. graph: (Optional.) The `Graph` to be launched (described above). config: (Optional) `ConfigProto` proto used to configure the session. """ if not config: # If config is not provided, choose some reasonable defaults for # interactive use: # # - Grow GPU memory as needed at the cost of fragmentation. gpu_options = config_pb2.GPUOptions(allow_growth=True) config = config_pb2.ConfigProto(gpu_options=gpu_options) # Interactive sessions always place pruned graphs. config.graph_options.place_pruned_graph = True super(InteractiveSession, self).__init__(target, graph, config) with InteractiveSession._count_lock: if InteractiveSession._active_session_count > 0: warnings.warn('An interactive session is already active. This can ' 'cause out-of-memory errors in some cases. You must ' 'explicitly call `InteractiveSession.close()` to release ' 'resources held by the other session(s).') InteractiveSession._active_session_count += 1 # NOTE(mrry): We do not use `Session._closed` here because it has unhelpful # semantics (in particular, it is not set to true if `Session.close()` is # called on a session that has not been "opened" by running a step) and we # cannot change those semantics without breaking existing code. self._explicitly_closed = False self._default_session = self.as_default() self._default_session.enforce_nesting = False self._default_session.__enter__() self._explicit_graph = graph if self._explicit_graph is not None: self._default_graph = graph.as_default() self._default_graph.enforce_nesting = False self._default_graph.__enter__() def close(self): """Closes an `InteractiveSession`.""" super(InteractiveSession, self).close() with InteractiveSession._count_lock: if not self._explicitly_closed: InteractiveSession._active_session_count -= 1 self._explicitly_closed = True else: return if self._explicit_graph is not None: self._default_graph.__exit__(None, None, None) self._default_graph = None self._default_session.__exit__(None, None, None) self._default_session = None
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/session.py
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/__init__.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.Timeline.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.client import timeline from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class TimelineTest(test.TestCase): def _validateTrace(self, chrome_trace_format): # Check that the supplied string is valid JSON. trace = json.loads(chrome_trace_format) # It should have a top-level key containing events. self.assertTrue('traceEvents' in trace) # Every event in the list should have a 'ph' field. for event in trace['traceEvents']: self.assertTrue('ph' in event) def testSimpleTimeline(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: sess.run(constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) @test_util.deprecated_graph_mode_only def testTimelineCpu(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with self.session(use_gpu=False) as sess: const1 = constant_op.constant(1.0, name='const1') const2 = constant_op.constant(2.0, name='const2') result = math_ops.add(const1, const2) + const1 * const2 sess.run(result, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) step_stats = run_metadata.step_stats devices = [d.device for d in step_stats.dev_stats] self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_dataflow=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_memory=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format( show_memory=False, show_dataflow=False) self._validateTrace(ctf) @test_util.deprecated_graph_mode_only def testTimelineGpu(self): if not test.is_gpu_available(cuda_only=True): return run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with self.session(force_gpu=True) as sess: const1 = constant_op.constant(1.0, name='const1') const2 = constant_op.constant(2.0, name='const2') result = math_ops.add(const1, const2) + const1 * const2 sess.run(result, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) step_stats = run_metadata.step_stats devices = [d.device for d in step_stats.dev_stats] self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices) if not test.is_built_with_rocm(): # skip this check for the ROCm platform # stream level tracing is not yet supported on the ROCm platform self.assertTrue('/device:GPU:0/stream:all' in devices) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_dataflow=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_memory=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format( show_memory=False, show_dataflow=False) self._validateTrace(ctf) def testTimelineWithRPCs(self): """Tests that Timeline can handle RPC tracing.""" metadata = config_pb2.RunMetadata() step_stats = metadata.step_stats dev_stats = step_stats.dev_stats.add() dev_stats.device = '/job:worker/replica:0/task:0/cpu:0' node_stats = dev_stats.node_stats.add() node_stats.node_name = 'RecvTensor' node_stats.all_start_micros = 12345 node_stats.op_end_rel_micros = 42 node_stats.timeline_label = ('[1024B] edge_160_conv2/biases/read from ' '/job:ps/replica:0/task:3/cpu:0 to ' '/job:worker/replica:0/task:0/cpu:0') tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) def testAnalysisAndAllocations(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() config = config_pb2.ConfigProto(device_count={'CPU': 3}) with session.Session(config=config) as sess: with ops.device('/cpu:0'): num1 = variables.Variable(1.0, name='num1') with ops.device('/cpu:1'): num2 = variables.Variable(2.0, name='num2') with ops.device('/cpu:2'): result = num1 + num2 + num1 * num2 self.evaluate(variables.global_variables_initializer()) sess.run(result, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) tl = timeline.Timeline(run_metadata.step_stats) step_analysis = tl.analyze_step_stats() ctf = step_analysis.chrome_trace.format_to_string() self._validateTrace(ctf) maximums = step_analysis.allocator_maximums cpuname = 'mklcpu' if test_util.IsMklEnabled() else 'cpu' self.assertTrue(cpuname in maximums) cpu_max = maximums[ 'cuda_host_bfc'] if 'cuda_host_bfc' in maximums else maximums[cpuname] # At least num1 + num2, both float32s (4 bytes each) self.assertGreaterEqual(cpu_max.num_bytes, 8) self.assertGreater(cpu_max.timestamp, 0) def testManyCPUs(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() config = config_pb2.ConfigProto(device_count={'CPU': 3}) with session.Session(config=config) as sess: with ops.device('/cpu:0'): num1 = variables.Variable(1.0, name='num1') with ops.device('/cpu:1'): num2 = variables.Variable(2.0, name='num2') with ops.device('/cpu:2'): result = num1 + num2 + num1 * num2 self.evaluate(variables.global_variables_initializer()) sess.run(result, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) step_stats = run_metadata.step_stats devices = [d.device for d in step_stats.dev_stats] self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices) self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:1' in devices) self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:2' in devices) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format() self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_dataflow=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format(show_memory=False) self._validateTrace(ctf) tl = timeline.Timeline(step_stats) ctf = tl.generate_chrome_trace_format( show_memory=False, show_dataflow=False) self._validateTrace(ctf) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/timeline_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session's list_devices API.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import cluster_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python import pywrap_tensorflow as tf_session from tensorflow.python.client import session from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.platform import googletest from tensorflow.python.training import server_lib class SessionListDevicesTest(test_util.TensorFlowTestCase): def testListDevices(self): with session.Session() as sess: devices = sess.list_devices() self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in set( [d.name for d in devices]), devices) # All valid device incarnations must be non-zero. self.assertTrue(all(d.incarnation != 0 for d in devices)) def testInvalidDeviceNumber(self): opts = tf_session.TF_NewSessionOptions() c_session = tf_session.TF_NewSession(ops.get_default_graph()._c_graph, opts) raw_device_list = tf_session.TF_SessionListDevices(c_session) size = tf_session.TF_DeviceListCount(raw_device_list) with self.assertRaises(errors.InvalidArgumentError): tf_session.TF_DeviceListMemoryBytes(raw_device_list, size) tf_session.TF_DeleteDeviceList(raw_device_list) tf_session.TF_CloseSession(c_session) def testListDevicesGrpcSession(self): server = server_lib.Server.create_local_server() with session.Session(server.target) as sess: devices = sess.list_devices() self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in set( [d.name for d in devices]), devices) # All valid device incarnations must be non-zero. self.assertTrue(all(d.incarnation != 0 for d in devices)) def testListDevicesClusterSpecPropagation(self): server1 = server_lib.Server.create_local_server() server2 = server_lib.Server.create_local_server() cluster_def = cluster_pb2.ClusterDef() job = cluster_def.job.add() job.name = 'worker' job.tasks[0] = server1.target[len('grpc://'):] job.tasks[1] = server2.target[len('grpc://'):] config = config_pb2.ConfigProto(cluster_def=cluster_def) with session.Session(server1.target, config=config) as sess: devices = sess.list_devices() device_names = set(d.name for d in devices) self.assertTrue( '/job:worker/replica:0/task:0/device:CPU:0' in device_names) self.assertTrue( '/job:worker/replica:0/task:1/device:CPU:0' in device_names) # All valid device incarnations must be non-zero. self.assertTrue(all(d.incarnation != 0 for d in devices)) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/session_list_devices_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests and benchmarks for interacting with the `tf.compat.v1.Session`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.client import session from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import server_lib class SessionBenchmark(test.Benchmark): """Tests and benchmarks for interacting with the `tf.compat.v1.Session`.""" def _benchmarkFeed(self, name, target, size, iters): """Runs a microbenchmark to measure the cost of feeding a tensor. Reports the median cost of feeding a tensor of `size` * `sizeof(float)` bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be feed. iters: The number of iterations to perform. """ feed_val = np.random.rand(size).astype(np.float32) times = [] with ops.Graph().as_default(): p = array_ops.placeholder(dtypes.float32, shape=[size]) # Fetch the operation rather than the tensor, to avoid measuring the time # to fetch back the value. no_op = array_ops.identity(p).op with session.Session(target) as sess: sess.run(no_op, feed_dict={p: feed_val}) # Warm-up run. for _ in xrange(iters): start_time = time.time() sess.run(no_op, feed_dict={p: feed_val}) end_time = time.time() times.append(end_time - start_time) print("%s %d %f" % (name, size, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkFetch(self, name, target, size, iters): """Runs a microbenchmark to measure the cost of fetching a tensor. Reports the median cost of fetching a tensor of `size` * `sizeof(float)` bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be fetched. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the tensor to be fetched as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([size])) with session.Session(target) as sess: sess.run(v.initializer) sess.run(v) # Warm-up run. for _ in xrange(iters): start_time = time.time() sess.run(v) end_time = time.time() times.append(end_time - start_time) print("%s %d %f" % (name, size, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkFetchPrebuilt(self, name, target, size, iters): """Runs a microbenchmark to measure the cost of fetching a tensor. Reports the median cost of fetching a tensor of `size` * `sizeof(float)` bytes. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. size: The number of floating-point numbers to be fetched. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the tensor to be fetched as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([size])) with session.Session(target) as sess: sess.run(v.initializer) runner = sess.make_callable(v) runner() # Warm-up run. for _ in xrange(iters): start_time = time.time() runner() end_time = time.time() times.append(end_time - start_time) print("%s %d %f" % (name, size, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkRunOp(self, name, target, iters): """Runs a microbenchmark to measure the cost of running an op. Reports the median cost of running a trivial (Variable) op. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the op to be run as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([])) with session.Session(target) as sess: sess.run(v.initializer) sess.run(v.op) # Warm-up run. for _ in xrange(iters): start_time = time.time() sess.run(v.op) end_time = time.time() times.append(end_time - start_time) print("%s %f" % (name, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def _benchmarkRunOpPrebuilt(self, name, target, iters): """Runs a microbenchmark to measure the cost of running an op. Reports the median cost of running a trivial (Variable) op. Args: name: A human-readable name for logging the output. target: The session target to use for the benchmark. iters: The number of iterations to perform. """ times = [] with ops.Graph().as_default(): # Define the op to be run as a variable, to avoid # constant-folding. v = variables.Variable(random_ops.random_normal([])) with session.Session(target) as sess: sess.run(v.initializer) runner = sess.make_callable(v.op) runner() # Warm-up run. for _ in xrange(iters): start_time = time.time() runner() end_time = time.time() times.append(end_time - start_time) print("%s %f" % (name, np.median(times))) self.report_benchmark(iters=1, wall_time=np.median(times), name=name) def benchmarkGrpcSession(self): server = server_lib.Server.create_local_server() self._benchmarkFeed("benchmark_session_feed_grpc_4B", server.target, 1, 30000) session.Session.reset(server.target) self._benchmarkFeed("benchmark_session_feed_grpc_4MB", server.target, 1 << 20, 25000) session.Session.reset(server.target) self._benchmarkFetch("benchmark_session_fetch_grpc_4B", server.target, 1, 40000) session.Session.reset(server.target) self._benchmarkFetch("benchmark_session_fetch_grpc_4MB", server.target, 1 << 20, 20000) session.Session.reset(server.target) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4B", server.target, 1, 50000) session.Session.reset(server.target) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_grpc_4MB", server.target, 1 << 20, 50000) session.Session.reset(server.target) self._benchmarkRunOp("benchmark_session_runop_grpc", server.target, 50000) session.Session.reset(server.target) self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_grpc", server.target, 100000) session.Session.reset(server.target) def benchmarkDirectSession(self): self._benchmarkFeed("benchmark_session_feed_direct_4B", "", 1, 80000) self._benchmarkFeed("benchmark_session_feed_direct_4MB", "", 1 << 20, 20000) self._benchmarkFetch("benchmark_session_fetch_direct_4B", "", 1, 100000) self._benchmarkFetch("benchmark_session_fetch_direct_4MB", "", 1 << 20, 20000) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4B", "", 1, 200000) self._benchmarkFetchPrebuilt("benchmark_session_fetchprebuilt_direct_4MB", "", 1 << 20, 200000) self._benchmarkRunOp("benchmark_session_runop_direct", "", 200000) self._benchmarkRunOpPrebuilt("benchmark_session_runopprebuilt_direct", "", 200000) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/session_benchmark.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Notebook front-end to TensorFlow. When you run this binary, you'll see something like below, which indicates the serving URL of the notebook: The IPython Notebook is running at: http://127.0.0.1:8888/ Press "Shift+Enter" to execute a cell Press "Enter" on a cell to go into edit mode. Press "Escape" to go back into command mode and use arrow keys to navigate. Press "a" in command mode to insert cell above or "b" to insert cell below. Your root notebooks directory is FLAGS.notebook_dir """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import socket import sys from tensorflow.python.platform import app # pylint: disable=g-import-not-at-top # Official recommended way of turning on fast protocol buffers as of 10/21/14 os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp" os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2" FLAGS = None ORIG_ARGV = sys.argv # Main notebook process calls itself with argv[1]="kernel" to start kernel # subprocesses. IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel" def main(unused_argv): sys.argv = ORIG_ARGV if not IS_KERNEL: # Drop all flags. sys.argv = [sys.argv[0]] # NOTE(sadovsky): For some reason, putting this import at the top level # breaks inline plotting. It's probably a bug in the stone-age version of # matplotlib. from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top notebookapp = NotebookApp.instance() notebookapp.open_browser = True # password functionality adopted from quality/ranklab/main/tools/notebook.py # add options to run with "password" if FLAGS.password: from IPython.lib import passwd # pylint: disable=g-import-not-at-top notebookapp.ip = "0.0.0.0" notebookapp.password = passwd(FLAGS.password) else: print("\nNo password specified; Notebook server will only be available" " on the local machine.\n") notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir]) if notebookapp.ip == "0.0.0.0": proto = "https" if notebookapp.certfile else "http" url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port, notebookapp.base_project_url) print("\nNotebook server will be publicly available at: %s\n" % url) notebookapp.start() return # Drop the --flagfile flag so that notebook doesn't complain about an # "unrecognized alias" when parsing sys.argv. sys.argv = ([sys.argv[0]] + [z for z in sys.argv[1:] if not z.startswith("--flagfile")]) from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top kernelapp = IPKernelApp.instance() kernelapp.initialize() # Enable inline plotting. Equivalent to running "%matplotlib inline". ipshell = kernelapp.shell ipshell.enable_matplotlib("inline") kernelapp.start() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--password", type=str, default=None, help="""\ Password to require. If set, the server will allow public access. Only used if notebook config file does not exist.\ """) parser.add_argument( "--notebook_dir", type=str, default="experimental/brain/notebooks", help="root location where to store notebooks") # When the user starts the main notebook process, we don't touch sys.argv. # When the main process launches kernel subprocesses, it writes all flags # to a tmpfile and sets --flagfile to that tmpfile, so for kernel # subprocesses here we drop all flags *except* --flagfile, then call # app.run(), and then (in main) restore all flags before starting the # kernel app. if IS_KERNEL: # Drop everything except --flagfile. sys.argv = ( [sys.argv[0]] + [x for x in sys.argv[1:] if x.startswith("--flagfile")]) FLAGS, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/notebook.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for multiple virtual GPU support.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import numpy as np from google.protobuf import text_format from tensorflow.core.protobuf import config_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging class VirtualGpuTestUtil(object): def __init__(self, dim=1000, num_ops=100, virtual_devices_per_gpu=None, device_probabilities=None): self._dim = dim self._num_ops = num_ops if virtual_devices_per_gpu is None: self._virtual_devices_per_gpu = [3] else: self._virtual_devices_per_gpu = virtual_devices_per_gpu self._visible_device_list = [ i for i in range(len(self._virtual_devices_per_gpu)) ] gpu_devices = [ ('/gpu:' + str(i)) for i in range(sum(self._virtual_devices_per_gpu)) ] self.devices = ['/cpu:0'] + gpu_devices self._num_devices = len(self.devices) # Each virtual device gets 2GB memory. self._mem_limits_mb = [ ([1 << 11] * i) for i in self._virtual_devices_per_gpu ] self.config = self._GetSessionConfig() if device_probabilities is not None: self._device_probabilities = list(device_probabilities) # Deep copy for i in range(1, self._num_devices): self._device_probabilities[i] += self._device_probabilities[i - 1] else: # Each device gets same probability to be assigned an operation. step = 1.0 / self._num_devices self._device_probabilities = [ (x + 1) * step for x in range(self._num_devices) ] # To prevent rounding error causing problems. self._device_probabilities[self._num_devices - 1] = 1.1 logging.info('dim: %d', self._dim) logging.info('num_ops: %d', self._num_ops) logging.info('visible_device_list: %s', str(self._visible_device_list)) logging.info('virtual_devices_per_gpu: %s', str(self._virtual_devices_per_gpu)) logging.info('mem_limits: %s', str(self._mem_limits_mb)) logging.info('devices: %s', str(self.devices)) logging.info('config: %s', text_format.MessageToString(self.config)) logging.info('device_probabilities: %s', str(self._device_probabilities)) # Creates virtual GPU devices def _GetSessionConfig(self): virtual_device_gpu_options = config_pb2.GPUOptions( visible_device_list=','.join(str(d) for d in self._visible_device_list), experimental=config_pb2.GPUOptions.Experimental(virtual_devices=[ config_pb2.GPUOptions.Experimental.VirtualDevices( memory_limit_mb=i) for i in self._mem_limits_mb ])) return config_pb2.ConfigProto(gpu_options=virtual_device_gpu_options) # Generates a list of 3-tuples, each tuple contains the source and destination # device index for a binary operation like 'add', like: # (src_devcie_1, src_device_2, dst_device) def _GenerateOperationPlacement(self): result = [] for unused_i in range(self._num_ops): op_device = () for unused_j in range(3): random_num = random.random() for device_index in range(self._num_devices): if self._device_probabilities[device_index] > random_num: op_device += (device_index,) break result.append(op_device) return result # Logs part of the matrix for debugging purposes. def _LogMatrix(self, mat, dim): logging.info('---- printing the first 10*10 submatrix ----') for i in range(min(10, dim)): row = '' for j in range(min(10, dim)): row += ' ' + str(mat[i][j]) logging.info(row) # Runs a list of 'add' operations where each operation satisfies the device # placement constraints in `op_placement`, and returns the result. def _TestRandomGraphWithDevices(self, sess, seed, op_placement, devices, debug_mode=False): data = [] shape = (self._dim, self._dim) feed_dict = {} # Initialize the matrices for i in range(len(devices)): with ops.device(devices[i]): var = array_ops.placeholder(dtypes.float32, shape=shape) np.random.seed(seed + i) feed_dict[var] = np.random.uniform( low=0, high=0.1, size=shape).astype(np.float32) data.append(var) # Run the 'add' operations on those matrices for op in op_placement: with ops.device(devices[op[2]]): data[op[2]] = math_ops.add(data[op[0]], data[op[1]]) with ops.device('/cpu:0'): s = data[0] for i in range(1, len(data)): s = math_ops.add(s, data[i]) if debug_mode: logging.info(ops.get_default_graph().as_graph_def()) result = sess.run(s, feed_dict=feed_dict) self._LogMatrix(result, self._dim) return result # Generates a random graph with `self._num_ops` 'add' operations with each # operation placed on different virtual device, test that the result is # identical to the result obtained by running the same graph on cpu only. def TestRandomGraph(self, sess, op_placement=None, random_seed=None): debug_mode = False if op_placement is None: op_placement = self._GenerateOperationPlacement() else: debug_mode = True if random_seed is None: random_seed = random.randint(0, 1 << 31) else: debug_mode = True logging.info('Virtual gpu functional test for random graph...') logging.info('operation placement: %s', str(op_placement)) logging.info('random seed: %d', random_seed) # Run with multiple virtual gpus. result_vgd = self._TestRandomGraphWithDevices( sess, random_seed, op_placement, self.devices, debug_mode=debug_mode) # Run with single cpu. result_cpu = self._TestRandomGraphWithDevices( sess, random_seed, op_placement, ['/cpu:0'] * self._num_devices, debug_mode=debug_mode) # Test the result for i in range(self._dim): for j in range(self._dim): if result_vgd[i][j] != result_cpu[i][j]: logging.error( 'Result mismatch at row %d column %d: expected %f, actual %f', i, j, result_cpu[i][j], result_vgd[i][j]) logging.error('Devices: %s', self.devices) logging.error('Memory limits (in MB): %s', self._mem_limits_mb) return False return True class VirtualGpuTest(test_util.TensorFlowTestCase): def __init__(self, method_name): super(VirtualGpuTest, self).__init__(method_name) self._util = VirtualGpuTestUtil() @test_util.deprecated_graph_mode_only def testStatsContainAllDeviceNames(self): with self.session(config=self._util.config) as sess: # TODO(laigd): b/70811538. The is_gpu_available() call will invoke # DeviceFactory::AddDevices() with a default SessionOption, which prevents # adding virtual devices in the future, thus must be called within a # context of a session within which virtual devices are created. Same in # the following test case. if not test.is_gpu_available(cuda_only=True): self.skipTest('No GPU available') run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() mat_shape = [10, 10] data = [] for d in self._util.devices: with ops.device(d): var = variables.Variable(random_ops.random_uniform(mat_shape)) self.evaluate(var.initializer) data.append(var) s = data[0] for i in range(1, len(data)): s = math_ops.add(s, data[i]) sess.run(s, options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) step_stats = run_metadata.step_stats devices = [d.device for d in step_stats.dev_stats] self.assertTrue('/job:localhost/replica:0/task:0/device:CPU:0' in devices) self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:0' in devices) self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:1' in devices) self.assertTrue('/job:localhost/replica:0/task:0/device:GPU:2' in devices) @test_util.deprecated_graph_mode_only def testLargeRandomGraph(self): with self.session(config=self._util.config) as sess: if not test.is_gpu_available(cuda_only=True): self.skipTest('No GPU available') for _ in range(5): if not self._util.TestRandomGraph(sess): return if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/virtual_gpu_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.session.Session.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import random import os import sys import threading import time import warnings import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.lib.core import error_codes_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.framework import common_shapes from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import device as framework_device_lib from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import function from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_util from tensorflow.python.framework import test_util from tensorflow.python.framework import versions from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import gen_control_flow_ops # Import gradients to resolve circular imports from tensorflow.python.ops import gradients # pylint: disable=unused-import from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops # Import resource_variable_ops for the variables-to-tensor implicit conversion. from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.training import server_lib from tensorflow.python.util import compat try: import attr # pylint:disable=g-import-not-at-top except ImportError: attr = None # NOTE(mrry): Dummy shape registration for ops used in the tests, since they # don't have C++ op registrations on which to attach C++ shape fns. ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape) class SessionTest(test_util.TensorFlowTestCase): def setUp(self): super(SessionTest, self).setUp() warnings.simplefilter('always') def testUseExistingGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(graph=g): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testUseDefaultGraph(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant(6.0, shape=[1, 1]) b = constant_op.constant(7.0, shape=[1, 1]) c = math_ops.matmul(a, b, name='matmul') with session.Session(): result = c.eval() self.assertAllEqual(result, [[42.0]]) def testCreate(self): with session.Session(): inp = constant_op.constant(10.0, shape=[2, 3], name='W1') copy = array_ops.identity(inp) # Test with feed. # TODO(mrry): Investigate why order='F' didn't work. arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C') copy_val = copy.eval({'W1:0': arr}) self.assertAllEqual(arr, copy_val) # Test without feed. copy_val = copy.eval() self.assertAllEqual( np.asarray( [[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32), copy_val) def testManyCPUs(self): with session.Session( config=config_pb2.ConfigProto(device_count={ 'CPU': 2, 'GPU': 0 })) as sess: inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) num_cpu_devices = 0 num_gpu_devices = 0 for device in sess.list_devices(): device_type = framework_device_lib.DeviceSpec.from_string( device.name).device_type if device_type == 'CPU': num_cpu_devices += 1 elif device_type == 'GPU': num_gpu_devices += 1 self.assertEqual(2, num_cpu_devices) self.assertEqual(0, num_gpu_devices) def testPerSessionThreads(self): with session.Session( config=config_pb2.ConfigProto(use_per_session_threads=True)): inp = constant_op.constant(10.0, name='W1') self.assertAllEqual(inp.eval(), 10.0) def testSessionInterOpThreadPool(self): config_pb = config_pb2.ConfigProto() pool = config_pb.session_inter_op_thread_pool.add() with session.Session(config=config_pb) as s: inp = constant_op.constant(10.0, name='W1') results = s.run([inp]) self.assertAllEqual([10.0], results) pool = config_pb.session_inter_op_thread_pool.add() pool.num_threads = 1 with session.Session(config=config_pb) as s: inp = constant_op.constant(20.0, name='W2') results = s.run([inp]) self.assertAllEqual([20.0], results) pool = config_pb.session_inter_op_thread_pool.add() pool.num_threads = 1 pool.global_name = 't1' run_options = config_pb2.RunOptions() run_options.inter_op_thread_pool = ( len(config_pb.session_inter_op_thread_pool) - 1) with session.Session(config=config_pb) as s: inp = constant_op.constant(30.0, name='W2') results = s.run([inp], options=run_options) self.assertAllEqual([30.0], results) def testErrorsReported(self): with session.Session() as s: constant_op.constant(10.0, name='W1') with self.assertRaises(ValueError): s.run('foo:0') def testErrorPayload(self): with session.Session(): a = array_ops.placeholder(dtypes.float32) with self.assertRaisesOpError(lambda e: e.op == a.op): a.eval() def testErrorCodeWithNoNodeDef(self): with session.Session() as s: a = array_ops.placeholder(dtypes.float32, shape=[]) b = array_ops.placeholder(dtypes.float32, shape=[]) r1 = math_ops.add(a, b) def exc_predicate(e): return (e.op is None and e.node_def is None and e.error_code == error_codes_pb2.INVALID_ARGUMENT) with self.assertRaisesOpError(exc_predicate): # Run with a bogus handle. s.partial_run('foo', r1, feed_dict={a: 1, b: 2}) def testErrorBasedOn(self): with session.Session() as sess: a = constant_op.constant(0.0, shape=[2, 3]) # NOTE(mrry): The original_op is nonsense, but used here to test that the # errors are reported correctly. with sess.graph._original_op(a.op): b = array_ops.identity(a, name='id') with sess.graph._original_op(b.op): c = array_ops.placeholder(dtypes.float32) def exc_predicate(e): return (e.op == c.op and e.op._original_op == b.op and e.op._original_op._original_op == a.op) with self.assertRaisesOpError(exc_predicate): c.eval() def testFetchNone(self): with session.Session() as s: a = constant_op.constant(1.0) with self.assertRaises(TypeError): s.run(None) with self.assertRaises(TypeError): s.run([None]) with self.assertRaises(TypeError): s.run({'b': None}) with self.assertRaises(TypeError): s.run({'a': a, 'b': None}) def testFetchSingleton(self): with session.Session() as sess: a = constant_op.constant(42.0) res = sess.run(a) self.assertEqual(42.0, res) res = sess.run(a.op) # An op, not a tensor. self.assertEqual(None, res) tensor_runner = sess.make_callable(a) res = tensor_runner() self.assertEqual(42.0, res) op_runner = sess.make_callable(a.op) res = op_runner() self.assertEqual(None, res) def testFetchSingletonByName(self): with session.Session() as sess: a = constant_op.constant(42.0) res = sess.run(a.name) self.assertEqual(42.0, res) res = sess.run(a.op) # An op, not a tensor. self.assertEqual(None, res) def testFetchList(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) v = variables.Variable([54.0]) assign = v.assign([63.0]) res = sess.run([a, b, c, a.name, assign.op]) self.assertTrue(isinstance(res, list)) self.assertEqual([42.0, None, 44.0, 42.0, None], res) list_runner = sess.make_callable([a, b, c, a.name, assign.op]) res = list_runner() self.assertTrue(isinstance(res, list)) self.assertEqual([42.0, None, 44.0, 42.0, None], res) def testFetchTuple(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run((a, b, c, a.name)) self.assertTrue(isinstance(res, tuple)) self.assertEqual((42.0, None, 44.0, 42.0), res) tuple_runner = sess.make_callable((a, b, c, a.name)) res = tuple_runner() self.assertTrue(isinstance(res, tuple)) self.assertEqual((42.0, None, 44.0, 42.0), res) def testFetchNamedTuple(self): # pylint: disable=invalid-name ABC = collections.namedtuple('ABC', ['a', 'b', 'c']) # pylint: enable=invalid-name with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run(ABC(a, b, c)) self.assertTrue(isinstance(res, ABC)) self.assertEqual(42.0, res.a) self.assertEqual(None, res.b) self.assertEqual(44.0, res.c) namedtuple_runner = sess.make_callable(ABC(a, b, c)) res = namedtuple_runner() self.assertTrue(isinstance(res, ABC)) self.assertEqual(42.0, res.a) self.assertEqual(None, res.b) self.assertEqual(44.0, res.c) def testFetchDict(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run({'a': a, 'b': b, 'c': c}) self.assertTrue(isinstance(res, dict)) self.assertEqual(42.0, res['a']) self.assertEqual(None, res['b']) self.assertEqual(44.0, res['c']) def testFetchOrderedDict(self): with session.Session() as sess: a = constant_op.constant(42.0) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(44.0) res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)])) self.assertTrue(isinstance(res, collections.OrderedDict)) self.assertEqual([3, 2, 1], list(res.keys())) self.assertEqual(42.0, res[3]) self.assertEqual(None, res[2]) self.assertEqual(44.0, res[1]) @test_util.run_v1_only('b/120545219') def testFetchAttrs(self): if attr is None: self.skipTest('attr module is unavailable.') @attr.s class SampleAttr(object): field1 = attr.ib() field2 = attr.ib() val1 = np.array([1.2, 3.4, 5.6]) val2 = np.array([[1, 2], [4, 3]]) val3 = np.array([10, 20, 30]) t1 = constant_op.constant(val1) t2 = constant_op.constant(val2) sample = SampleAttr(t1, t2) with session.Session() as sess: result = sess.run(sample) self.assertIsInstance(result, SampleAttr) self.assertAllEqual(val1, result.field1) self.assertAllEqual(val2, result.field2) result = sess.run(sample, feed_dict={sample.field1: val3}) self.assertIsInstance(result, SampleAttr) self.assertAllEqual(val3, result.field1) self.assertAllEqual(val2, result.field2) @test_util.run_v1_only('b/120545219') def testFetchNestedAttrs(self): if attr is None: self.skipTest('attr module is unavailable.') @attr.s class SampleAttr(object): field0 = attr.ib() field1 = attr.ib() v1 = 10 v2 = 20 v3 = np.float32(1.2) v4 = np.float32(3.4) v5 = np.float64(100.001) v6 = np.float64(-23.451) arr1 = np.array([1.2, 6.7, 3.4]) arr2 = np.array([7, 11, 3]) sample = SampleAttr( SampleAttr( SampleAttr(constant_op.constant(v1), constant_op.constant(v2)), SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))), {'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)), 'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]}) with session.Session() as sess: result = sess.run(sample) self.assertIsInstance(result, SampleAttr) self.assertIsInstance(result.field0, SampleAttr) self.assertIsInstance(result.field0.field0, SampleAttr) self.assertIsInstance(result.field0.field1, SampleAttr) self.assertIsInstance(result.field0.field1.field0, np.ndarray) self.assertAllEqual(arr1, result.field0.field1.field0) self.assertIsInstance(result.field0.field1.field1, np.ndarray) self.assertAllEqual(arr2, result.field0.field1.field1) self.assertIsInstance(result.field1, dict) self.assertIn('A', result.field1) self.assertIn('B', result.field1) self.assertIsInstance(result.field1['A'], SampleAttr) self.assertAllEqual( [v3, v4], [result.field1['A'].field0, result.field1['A'].field1]) self.assertIsInstance(result.field1['B'], list) self.assertEqual(1, len(result.field1['B'])) self.assertIsInstance(result.field1['B'][0], SampleAttr) self.assertAllEqual( [v5, v6], [result.field1['B'][0].field0, result.field1['B'][0].field1]) def testFetchNestingEmptyOneLevel(self): with session.Session() as sess: a_val = 11.0 a = constant_op.constant(a_val) res = sess.run([[], tuple(), {}]) self.assertTrue(isinstance(res, list)) self.assertEquals(3, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(0, len(res[0])) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(0, len(res[1])) self.assertTrue(isinstance(res[2], dict)) self.assertEqual(0, len(res[2])) res = sess.run([[], tuple(), {}, a]) self.assertTrue(isinstance(res, list)) self.assertEquals(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(0, len(res[0])) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(0, len(res[1])) self.assertTrue(isinstance(res[2], dict)) self.assertEqual(0, len(res[2])) self.assertEqual(a_val, res[3]) def testFetchNestingOneLevel(self): with session.Session() as sess: # pylint: disable=invalid-name ABC = collections.namedtuple('ABC', ['a', 'b', 'c']) DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g']) # pylint: enable=invalid-name a_val = 42.0 b_val = None c_val = 44.0 a = constant_op.constant(a_val) b = control_flow_ops.no_op() # An op, not a tensor. c = constant_op.constant(c_val) # List of lists, tuples, namedtuple, and dict res = sess.run([[a, b, c], (a, b, c), ABC(a=a, b=b, c=c), { 'a': a.name, 'c': c, 'b': b }]) self.assertTrue(isinstance(res, list)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(3, len(res[0])) self.assertEqual(a_val, res[0][0]) self.assertEqual(b_val, res[0][1]) self.assertEqual(c_val, res[0][2]) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(3, len(res[1])) self.assertEqual(a_val, res[1][0]) self.assertEqual(b_val, res[1][1]) self.assertEqual(c_val, res[1][2]) self.assertTrue(isinstance(res[2], ABC)) self.assertEqual(a_val, res[2].a) self.assertEqual(b_val, res[2].b) self.assertEqual(c_val, res[2].c) self.assertTrue(isinstance(res[3], dict)) self.assertEqual(3, len(res[3])) self.assertEqual(a_val, res[3]['a']) self.assertEqual(b_val, res[3]['b']) self.assertEqual(c_val, res[3]['c']) # Tuple of lists, tuples, namedtuple, and dict res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), { 'a': a, 'c': c, 'b': b })) self.assertTrue(isinstance(res, tuple)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res[0], list)) self.assertEqual(3, len(res[0])) self.assertEqual(a_val, res[0][0]) self.assertEqual(b_val, res[0][1]) self.assertEqual(c_val, res[0][2]) self.assertTrue(isinstance(res[1], tuple)) self.assertEqual(3, len(res[1])) self.assertEqual(a_val, res[1][0]) self.assertEqual(b_val, res[1][1]) self.assertEqual(c_val, res[1][2]) self.assertTrue(isinstance(res[2], ABC)) self.assertEqual(a_val, res[2].a) self.assertEqual(b_val, res[2].b) self.assertEqual(c_val, res[2].c) self.assertTrue(isinstance(res[3], dict)) self.assertEqual(3, len(res[3])) self.assertEqual(a_val, res[3]['a']) self.assertEqual(b_val, res[3]['b']) self.assertEqual(c_val, res[3]['c']) # Namedtuple of lists, tuples, namedtuples, and dict res = sess.run( DEFG( d=[a, b, c], e=(a, b, c), f=ABC(a=a.name, b=b, c=c), g={ 'a': a, 'c': c, 'b': b })) self.assertTrue(isinstance(res, DEFG)) self.assertTrue(isinstance(res.d, list)) self.assertEqual(3, len(res.d)) self.assertEqual(a_val, res.d[0]) self.assertEqual(b_val, res.d[1]) self.assertEqual(c_val, res.d[2]) self.assertTrue(isinstance(res.e, tuple)) self.assertEqual(3, len(res.e)) self.assertEqual(a_val, res.e[0]) self.assertEqual(b_val, res.e[1]) self.assertEqual(c_val, res.e[2]) self.assertTrue(isinstance(res.f, ABC)) self.assertEqual(a_val, res.f.a) self.assertEqual(b_val, res.f.b) self.assertEqual(c_val, res.f.c) self.assertTrue(isinstance(res.g, dict)) self.assertEqual(3, len(res.g)) self.assertEqual(a_val, res.g['a']) self.assertEqual(b_val, res.g['b']) self.assertEqual(c_val, res.g['c']) # Dict of lists, tuples, namedtuples, and dict res = sess.run({ 'd': [a, b, c], 'e': (a, b, c), 'f': ABC(a=a, b=b, c=c), 'g': { 'a': a.name, 'c': c, 'b': b } }) self.assertTrue(isinstance(res, dict)) self.assertEqual(4, len(res)) self.assertTrue(isinstance(res['d'], list)) self.assertEqual(3, len(res['d'])) self.assertEqual(a_val, res['d'][0]) self.assertEqual(b_val, res['d'][1]) self.assertEqual(c_val, res['d'][2]) self.assertTrue(isinstance(res['e'], tuple)) self.assertEqual(3, len(res['e'])) self.assertEqual(a_val, res['e'][0]) self.assertEqual(b_val, res['e'][1]) self.assertEqual(c_val, res['e'][2]) self.assertTrue(isinstance(res['f'], ABC)) self.assertEqual(a_val, res['f'].a) self.assertEqual(b_val, res['f'].b) self.assertEqual(c_val, res['f'].c) self.assertTrue(isinstance(res['g'], dict)) self.assertEqual(3, len(res['g'])) self.assertEqual(a_val, res['g']['a']) self.assertEqual(b_val, res['g']['b']) self.assertEqual(c_val, res['g']['c']) def testFetchTensorObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) results_with_list = s.run([c]) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0]) results_with_single = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single) results_with_get = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get) a_val, b_val = s.run([a, b]) # Test multiple fetches. self.assertAllEqual([[1.0, 1.0]], a_val) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val) results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]}) self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0]) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], results_with_dict['b']) self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0]) self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1]) # Test nested structures results_with_nested_list = s.run([[[a, b], b], a, [a, b]]) self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0]) self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], results_with_nested_list[0][0][1]) self.assertAllEqual(results_with_nested_list[0][0][0], results_with_nested_list[1]) self.assertAllEqual(results_with_nested_list[1], results_with_nested_list[2][0]) self.assertAllEqual(results_with_nested_list[0][0][1], results_with_nested_list[0][1]) self.assertAllEqual(results_with_nested_list[0][1], results_with_nested_list[2][1]) def testFetchScalar(self): with session.Session() as s: for scalar in np.int32, np.int64, np.float16, np.float32, np.float64: x = scalar(7) y = scalar(8) tf_x = constant_op.constant(x, shape=[]) tf_y = constant_op.constant(y) tf_xy = math_ops.add(tf_x, tf_y) # Single fetch xy = s.run(tf_xy) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # List fetch xy, = s.run([tf_xy]) self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # Dict fetch xy = s.run({'xy': tf_xy})['xy'] self.assertEqual(scalar, type(xy)) self.assertEqual(x + y, xy) # Nested list fetch xy = s.run([[[tf_xy]], tf_xy, [tf_xy]]) self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]]) self.assertEqual(scalar, type(xy[0][0][0])) self.assertEqual(scalar, type(xy[1])) self.assertEqual(scalar, type(xy[2][0])) def testFetchOperationObject(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) v = variables.Variable(a, name='testFetchOperationObject_v') s.run(v.initializer) v_val = s.run(v) self.assertAllEqual([[1.0, 1.0]], v_val) def testFetchSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = sparse_tensor.SparseTensor( constant_op.constant(indices), constant_op.constant(values), constant_op.constant(shape)) # Single fetch, use as tuple sp_out = s.run(sp) indices_out, values_out, shape_out = sp_out self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Single fetch, use as SparseTensorValue sp_out = s.run(sp) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Tuple fetch, use as tuple indices_out, values_out, shape_out = s.run(sp) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as tuple (indices_out, values_out, shape_out), = s.run([sp]) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # List fetch, use as SparseTensorValue sp_out, = s.run([sp]) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Dict fetch (single value), use as tuple indices_out, values_out, shape_out = s.run({'sp': sp})['sp'] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Dict fetch (list value), use as tuple (indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp'] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Dict fetch, use as SparseTensorValue sp_out = s.run({'sp': sp})['sp'] self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Nested list fetch use as tuple sp_out = s.run([[[sp]], sp]) indices_out, values_out, shape_out = sp_out[0][0][0] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) indices_out, values_out, shape_out = sp_out[1] self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Nested list fetch, use as SparseTensorValue sp_out = s.run([[[sp]], sp]) self.assertAllEqual(sp_out[0][0][0].indices, indices) self.assertAllEqual(sp_out[0][0][0].values, values) self.assertAllEqual(sp_out[0][0][0].dense_shape, shape) self.assertAllEqual(sp_out[1].indices, indices) self.assertAllEqual(sp_out[1].values, values) self.assertAllEqual(sp_out[1].dense_shape, shape) def testFeedSparseTensor(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = sparse_tensor.SparseTensor( array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(3,)), ) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with tuple, fetch sp directly sp_out = s.run(sp, {sp: (indices, values, shape)}) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) # Feed SparseTensorValue and fetch sp directly. sp_out = s.run(sp, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp_out.indices, indices) self.assertAllEqual(sp_out.values, values) self.assertAllEqual(sp_out.dense_shape, shape) def testFeedSparsePlaceholder(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1') sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) def testFeedSparsePlaceholderPartialShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder( shape=[None, 9, 2], dtype=np.float32, name='placeholder1') sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) # Feed with SparseTensorValue, fetch SparseTensorValue sp2_out = s.run(sp2, { sp: sparse_tensor.SparseTensorValue(indices, values, shape) }) self.assertAllEqual(sp2_out.indices, indices) self.assertAllEqual(sp2_out.values, values) self.assertAllEqual(sp2_out.dense_shape, shape) def testFeedSparsePlaceholderConstantShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) shape = np.array([7, 9, 2]).astype(np.int64) sp = array_ops.sparse_placeholder( dtype=np.float32, shape=shape, name='placeholder1') self.assertAllEqual(sp.dense_shape.eval(session=s), shape) self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape) sp_indices = array_ops.identity(sp.indices) sp_values = array_ops.identity(sp.values) sp_shape = array_ops.identity(sp.dense_shape) # Feed with tuple indices_out, values_out, shape_out = s.run( [sp_indices, sp_values, sp_shape], { sp: (indices, values) }) self.assertAllEqual(indices_out, indices) self.assertAllEqual(values_out, values) self.assertAllEqual(shape_out, shape) def testFetchIndexedSlices(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), constant_op.constant(dense_shape)) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlices(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = np.array([7, 9, 2]).astype(np.int64) ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), array_ops.placeholder(dtype=np.int64, shape=(3,)), ) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind_dense_shape = array_ops.identity(ind.dense_shape) ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape) # Feed with tuple values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], { ind: (values, indices, dense_shape) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue values_out, indices_out, dense_shape_out = s.run( [ind_values, ind_indices, ind_dense_shape], { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testFetchIndexedSlicesWithoutDenseShape(self): with session.Session() as s: indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) values = np.array([1.0, 2.0]).astype(np.float32) dense_shape = None ind = ops.IndexedSlices( constant_op.constant(values), constant_op.constant(indices), None) # Single fetch, use as tuple ind_out = s.run(ind) values_out, indices_out, dense_shape_out = ind_out self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # Single fetch, use as IndexedSlicesValue ind_out = s.run(ind) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) # Tuple fetch, use as tuple values_out, indices_out, dense_shape_out = s.run(ind) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as tuple (values_out, indices_out, dense_shape_out), = s.run([ind]) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) self.assertAllEqual(dense_shape_out, dense_shape) # List fetch, use as IndexedSlicesValue ind_out, = s.run([ind]) self.assertAllEqual(ind_out.values, values) self.assertAllEqual(ind_out.indices, indices) self.assertAllEqual(ind_out.dense_shape, dense_shape) def testFeedIndexedSlicesWithoutDenseShape(self): with session.Session() as s: values = np.array([1.0, 2.0]).astype(np.float32) indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64) dense_shape = None ind = ops.IndexedSlices( array_ops.placeholder(dtype=np.float32, shape=(2,)), array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None) ind_values = array_ops.identity(ind.values) ind_indices = array_ops.identity(ind.indices) ind2 = ops.IndexedSlices(ind_values, ind_indices) # Feed with tuple values_out, indices_out = s.run([ind_values, ind_indices], { ind: (values, indices) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue values_out, indices_out = s.run([ind_values, ind_indices], { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(values_out, values) self.assertAllEqual(indices_out, indices) # Feed with IndexedSlicesValue, fetch IndexedSlicesValue ind2_out = s.run(ind2, { ind: ops.IndexedSlicesValue(values, indices, dense_shape) }) self.assertAllEqual(ind2_out.values, values) self.assertAllEqual(ind2_out.indices, indices) self.assertAllEqual(ind2_out.dense_shape, dense_shape) def testExtendWithStatelessOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = s.run(c) self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) # Extend will happen here. e_val = s.run(e) self.assertAllEqual([[24.0]], e_val) def testExtendWithStatefulOperations(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testExtendWithStatefulOperations_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) # Extend will happen here. e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) def testExtendWithGroupBy(self): with session.Session() as s: a = constant_op.constant(1.0, shape=[1, 2]) p = variables.Variable(a, name='testExtendWithGroupBy_p') a_val = a.eval() # Force an Extend after this op. self.assertAllEqual([[1.0, 1.0]], a_val) b = constant_op.constant(2.0, shape=[1, 2]) q = variables.Variable(b, name='testExtendWithGroupBy_q') # Extend will happen here. init = control_flow_ops.group(p.initializer, q.initializer) s.run(init) p_val, q_val = s.run([p, q]) self.assertAllEqual([[1.0, 1.0]], p_val) self.assertAllEqual([[2.0, 2.0]], q_val) def testTensorGetMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) c_val = c.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], c_val) fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]}) self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val) @test_util.run_v1_only('b/120545219') def testOperationRunMethod(self): with session.Session(): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 2], name='b') v = variables.VariableV1(a, a.dtype) assign_a_to_v = state_ops.assign(v, a) assign_a_to_v.eval() v_val = v.eval() self.assertAllEqual([[1.0, 1.0]], v_val) assign_b_to_v = state_ops.assign(v, b) assign_b_to_v.eval() v_val = v.eval() self.assertAllEqual([[2.0, 2.0]], v_val) assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]}) v_val = v.eval() self.assertAllEqual([[3.0, 3.0]], v_val) def testDefaultGraph(self): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) self.assertEqual(ops.get_default_graph(), a.graph) self.assertEqual(ops.get_default_graph(), b.graph) c = math_ops.matmul(a, b) v = variables.Variable(c, name='testDefaultGraph_v') v.initializer.run() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def _testDefaultGraphInThread(self, constructed_event, continue_event, i): with session.Session() as s: self.assertEqual(ops.get_default_graph(), s.graph) a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) v = variables.Variable(c, name='var_%d' % i) # Block here until all threads have constructed their graph. constructed_event.set() continue_event.wait() assign_c_to_v = state_ops.assign(v, c) v.initializer.run() assign_c_to_v.eval() v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) d = constant_op.constant(3.0, shape=[2, 3]) e = math_ops.matmul(a, d) assign_e_to_v = state_ops.assign(v, e) e_val = e.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], e_val) v_val = v.eval() self.assertAllEqual([[4.0, 4.0, 4.0]], v_val) s.run(assign_e_to_v) v_val = v.eval() self.assertAllEqual([[6.0, 6.0, 6.0]], v_val) self.assertEqual(ops.get_default_graph(), s.graph) def testDefaultGraphWithThreads(self): # Fork ten threads that use their thread-local default graph. threads = [] constructed_events = [threading.Event() for _ in range(10)] continue_event = threading.Event() for i, constructed_event in enumerate(constructed_events): t = self.checkedThread( target=self._testDefaultGraphInThread, args=(constructed_event, continue_event, i)) threads.append(t) for t in threads: t.start() for constructed_event in constructed_events: constructed_event.wait() continue_event.set() for t in threads: t.join() def testParallelRun(self): with session.Session() as sess: c = constant_op.constant(5.0) ev = threading.Event() def run_step(): ev.wait() val = c.eval(session=sess) self.assertEqual(val, 5.0) threads = [self.checkedThread(target=run_step) for _ in range(100)] for t in threads: t.start() ev.set() for t in threads: t.join() @staticmethod def _build_graph(): time.sleep(random.random() * 0.1) # Do some graph construction. Try to exercise non-trivial paths. graph = ops.get_default_graph() gdef = None for _ in range(10): x = array_ops.placeholder(dtype=dtypes.float32) with ops.colocate_with(x): y = array_ops.placeholder(dtype=dtypes.float32) with ops.device('/cpu:0'): z = control_flow_ops.while_loop( lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y]) with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}): gradients_impl.gradients(z, [x, y]) if gdef is None: gdef = graph.as_graph_def() else: importer.import_graph_def(gdef, name='import') @test_util.run_v1_only('b/120545219') def testParallelRunAndSingleBuild(self): with session.Session() as sess: c = constant_op.constant(5.0) stop = threading.Event() def run_loop(): while not stop.is_set(): time.sleep(random.random() * 0.1) self.assertEqual(sess.run(c), 5.0) threads = [self.checkedThread(target=run_loop) for _ in range(10)] for t in threads: t.start() SessionTest._build_graph() stop.set() for t in threads: t.join() @test_util.run_v1_only('b/120545219') def testParallelRunAndParallelBuild(self): with session.Session() as sess: c = constant_op.constant(5.0) stop = threading.Event() def run_loop(): while not stop.is_set(): time.sleep(random.random() * 0.1) self.assertEqual(sess.run(c), 5.0) run_threads = [self.checkedThread(target=run_loop) for _ in range(10)] for t in run_threads: t.start() build_threads = [self.checkedThread(target=SessionTest._build_graph) for _ in range(10)] for t in build_threads: t.start() for t in build_threads: t.join() # Let the run_threads run until the build threads are finished. stop.set() for t in run_threads: t.join() def testRunFeedDict(self): with session.Session() as s: x = array_ops.zeros([2]) y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)}) self.assertAllEqual(y, 2 * np.ones(2)) y = s.run(2 * x, feed_dict={x: [1, 1]}) assert (y == 2 * np.ones(2)).all() # Test nested tuple keys z = (((array_ops.zeros([2]),),), array_ops.zeros([2]), (array_ops.zeros([2]),)) result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2] values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),)) result_value = s.run(result, feed_dict={z: values}) self.assertAllEqual(result_value[0], 2 * np.ones(2)) self.assertAllEqual(result_value[1], 2 * np.array([2, 2])) self.assertAllEqual(result_value[2], 2 * np.array([3, 3])) def testGraphDef(self): with session.Session() as sess: self.assertProtoEquals('versions { producer: %d min_consumer: %d }' % (versions.GRAPH_DEF_VERSION, versions.GRAPH_DEF_VERSION_MIN_CONSUMER), sess.graph_def) c = constant_op.constant(5.0, name='c') self.assertEquals(len(sess.graph_def.node), 1) d = constant_op.constant(6.0, name='d') self.assertEquals(len(sess.graph_def.node), 2) self.assertAllEqual(c.eval(), 5.0) self.assertAllEqual(d.eval(), 6.0) e = constant_op.constant(7.0, name='e') self.assertEquals(len(sess.graph_def.node), 3) self.assertAllEqual(e.eval(), 7.0) def testUseAfterClose(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): sess.run(c) def testUseAfterCloseConcurrent(self): with session.Session() as sess: c = constant_op.constant(5.0) self.assertAllEqual(sess.run(c), 5.0) def update_thread(): with self.assertRaisesWithPredicateMatch( RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)): while True: sess.run(c) t = threading.Thread(target=update_thread) t.start() time.sleep(0.1) sess.close() t.join() def testUseEmptyGraph(self): with session.Session() as sess: with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run([]) with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run(()) with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'): sess.run({}) @test_util.run_v1_only('b/120545219') def testNotEntered(self): # pylint: disable=protected-access self.assertEqual(ops._default_session_stack.get_default(), None) # pylint: enable=protected-access with ops.device('/cpu:0'): sess = session.Session() c_1 = constant_op.constant(5.0) with sess.graph.as_default(): c_2 = constant_op.constant(5.0) self.assertEqual(c_1.graph, c_2.graph) self.assertEqual(sess.run(c_2), 5.0) with self.assertRaisesWithPredicateMatch( ValueError, lambda e: 'No default session is registered.' in str(e)): c_2.eval() @test_util.run_v1_only('b/120545219') def testInteractive(self): with ops.device('/cpu:0'): sess = session.InteractiveSession() a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval()) d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1]) e = math_ops.matmul(c, d) self.assertAllEqual([[24.0]], e.eval()) sess.close() @test_util.run_v1_only('b/120545219') def testMultipleInteractiveSessionsWarning(self): # Reinitialize the global state to ensure that the expected warnings will # be emitted. session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access sess = session.InteractiveSession() sess.run(constant_op.constant(4.0)) # Run so that the session is "opened". sess.close() # Opening and closing interactive sessions serially should not warn. with warnings.catch_warnings(record=True) as w: sess = session.InteractiveSession() sess.close() self.assertEqual(0, len(w)) with warnings.catch_warnings(record=True) as w: sess = session.InteractiveSession() self.assertEqual(0, len(w)) with warnings.catch_warnings(record=True) as w: sess2 = session.InteractiveSession() self.assertEqual(1, len(w)) self.assertTrue('An interactive session is already active. This can cause ' 'out-of-memory errors in some cases. You must explicitly ' 'call `InteractiveSession.close()` to release resources ' 'held by the other session(s).' in str(w[0].message)) sess2.close() sess.close() @test_util.run_v1_only('b/120545219') def testInteractivePlacePrunedGraph(self): sess = session.InteractiveSession() # Build a graph that has a bad op in it (no kernel). # # This test currently does not link in any GPU kernels, # which is why placing this is invalid. If at some point # GPU kernels are added to this test, some other different # op / device combo should be chosen. with ops.device('/device:GPU:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(1.0, shape=[1, 2]) # Only run the valid op, this should work. b.eval() with self.assertRaises(errors.InvalidArgumentError): a.eval() sess.close() @test_util.run_v1_only('b/120545219') def testDefaultSessionPlacePrunedGraph(self): sess = session.Session() # Build a graph that has a bad op in it (no kernel). # # This test currently does not link in any GPU kernels, # which is why placing this is invalid. If at some point # GPU kernels are added to this test, some other different # op / device combo should be chosen. with ops.device('/device:GPU:0'): _ = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(1.0, shape=[1, 2]) with self.assertRaises(errors.InvalidArgumentError): # Even though we don't run the bad op, we place the entire # graph, which should fail with a non-interactive session. sess.run(b) sess.close() def testSharedGraph(self): with ops.Graph().as_default() as g, ops.device('/cpu:0'): a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[2, 3]) c = math_ops.matmul(a, b) with session.Session(graph=g) as sess1: with session.Session(graph=g) as sess2: self.assertAllEqual(sess1.run(c), sess2.run(c)) def testDuplicatedInputs(self): with session.Session() as sess: a = constant_op.constant(1.0, shape=[1, 2]) b = constant_op.constant(2.0, shape=[1, 3]) a_val, b_val, a2_val = sess.run([a, b, a]) self.assertAllEqual(a_val, [[1.0, 1.0]]) self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]]) self.assertAllEqual(a2_val, [[1.0, 1.0]]) def testFeedAndFetch(self): with session.Session() as sess: for dtype in [ dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool, dtypes.complex64, dtypes.complex128 ]: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: np_dtype = dtype.as_numpy_dtype feed_t = array_ops.placeholder(dtype=dtype, shape=shape) out_t = array_ops.identity(feed_t) np_array = np.random.randint(-10, 10, shape) if dtype == dtypes.bool: np_array = np_array > 0 elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) elif dtype == dtypes.complex64: np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) self.assertAllEqual(np_array, sess.run(out_t, feed_dict={ feed_t: np_array })) # Check that we can also get the feed back. self.assertAllEqual(np_array, sess.run(feed_t, feed_dict={ feed_t: np_array })) # Also check that we can get both back. out_v, feed_v = sess.run( [out_t, feed_t], feed_dict={ feed_t: np_array }) self.assertAllEqual(np_array, out_v) self.assertAllEqual(np_array, feed_v) feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t]) out_v, feed_v = feed_fetch_runner(np_array) self.assertAllEqual(np_array, out_v) self.assertAllEqual(np_array, feed_v) def testMakeCallableOnTensorWithRunOptions(self): with session.Session() as sess: a = constant_op.constant(42.0) tensor_runner = sess.make_callable(a, accept_options=True) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() self.assertEqual(0, len(run_metadata.step_stats.dev_stats)) res = tensor_runner(options=run_options, run_metadata=run_metadata) self.assertEqual(42.0, res) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testMakeCallableOnOperationWithRunOptions(self): with session.Session() as sess: a = variables.Variable(42.0) b = state_ops.assign_add(a, 1.0) sess.run(a.initializer) tensor_runner = sess.make_callable(b.op, accept_options=True) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() self.assertEqual(0, len(run_metadata.step_stats.dev_stats)) tensor_runner(options=run_options, run_metadata=run_metadata) self.assertEqual(43.0, sess.run(a)) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testMakeCallableWithFeedListAndRunOptions(self): with session.Session() as sess: ph = array_ops.placeholder(dtypes.float32) a = math_ops.add(ph, 1.0) tensor_runner = sess.make_callable( a, feed_list=[ph.name], accept_options=True) run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() self.assertEqual(0, len(run_metadata.step_stats.dev_stats)) self.assertAllClose(42.0, tensor_runner( 41.0, options=run_options, run_metadata=run_metadata)) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testOptimizedMakeCallable(self): with session.Session() as sess: ph = array_ops.placeholder(dtypes.float32) a = math_ops.add(ph, 1.0) callable_opts = config_pb2.CallableOptions() callable_opts.feed.append(ph.name) callable_opts.fetch.append(a.name) for _ in range(3): callable_fn = sess._make_callable_from_options(callable_opts) for _ in range(5): self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32))) def testOptimizedMakeCallableWithRunMetadata(self): with session.Session() as sess: ph = array_ops.placeholder(dtypes.float32) a = math_ops.add(ph, 1.0) callable_opts = config_pb2.CallableOptions() callable_opts.feed.append(ph.name) callable_opts.fetch.append(a.name) callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE callable_fn = sess._make_callable_from_options(callable_opts) run_metadata = config_pb2.RunMetadata() self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32), run_metadata=run_metadata)) self.assertGreater(len(run_metadata.step_stats.dev_stats), 0) def testFeedError(self): with session.Session() as sess: feed_t = array_ops.placeholder(dtype=dtypes.float32) out_t = array_ops.identity(feed_t) feed_val = constant_op.constant(5.0) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): sess.run(out_t, feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.eval(feed_dict={feed_t: feed_val}) with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'): out_t.op.run(feed_dict={feed_t: feed_val}) def testFeedPrecisionLossError(self): with session.Session() as sess: largest_int64 = np.iinfo(np.int64).max feed_int_implicit_int32 = constant_op.constant(1) feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32) out_t = constant_op.constant(1.0) with self.assertRaisesRegexp(TypeError, 'is not compatible with Tensor type'): sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64}) with self.assertRaisesRegexp(TypeError, 'is not compatible with Tensor type'): sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64}) def testStringFetch(self): with session.Session(): for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array( [compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) if size > 0 else [] c = constant_op.constant(c_list) self.assertAllEqual(c.eval(), c_list) def testStringFeed(self): with session.Session() as sess: for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]: size = 1 for s in shape: size *= s c_list = np.array( [compat.as_bytes(str(i)) for i in xrange(size)], dtype=np.object).reshape(shape) feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape) c = array_ops.identity(feed_t) self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list) self.assertAllEqual( sess.run(feed_t, feed_dict={ feed_t: c_list }), c_list) c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list}) self.assertAllEqual(c_v, c_list) self.assertAllEqual(feed_v, c_list) def testStringFeedWithNullCharacters(self): with session.Session(): c_list = [b'\n\x01\x00', b'\n\x00\x01'] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) self.assertEqual(c_list[0], out[0]) self.assertEqual(c_list[1], out[1]) def testStringFeedWithUnicode(self): with session.Session(): c_list = [ u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode', u'\U0001f60e deal with it' ] feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)]) c = array_ops.identity(feed_t) out = c.eval(feed_dict={feed_t: c_list}) for i in range(len(c_list)): self.assertEqual(c_list[i], out[i].decode('utf-8')) out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)}) for i in range(len(c_list)): self.assertEqual(c_list[i], out[i].decode('utf-8')) def testInvalidTargetFails(self): with self.assertRaisesRegexp( errors.NotFoundError, 'No session factory registered for the given session options'): session.Session('INVALID_TARGET') def testFetchByNameDifferentStringTypes(self): with session.Session() as sess: c = constant_op.constant(42.0, name='c') d = constant_op.constant(43.0, name=u'd') e = constant_op.constant(44.0, name=b'e') f = constant_op.constant(45.0, name=r'f') self.assertTrue(isinstance(c.name, six.text_type)) self.assertTrue(isinstance(d.name, six.text_type)) self.assertTrue(isinstance(e.name, six.text_type)) self.assertTrue(isinstance(f.name, six.text_type)) self.assertEqual(42.0, sess.run('c:0')) self.assertEqual(42.0, sess.run(u'c:0')) self.assertEqual(42.0, sess.run(b'c:0')) self.assertEqual(42.0, sess.run(r'c:0')) self.assertEqual(43.0, sess.run('d:0')) self.assertEqual(43.0, sess.run(u'd:0')) self.assertEqual(43.0, sess.run(b'd:0')) self.assertEqual(43.0, sess.run(r'd:0')) self.assertEqual(44.0, sess.run('e:0')) self.assertEqual(44.0, sess.run(u'e:0')) self.assertEqual(44.0, sess.run(b'e:0')) self.assertEqual(44.0, sess.run(r'e:0')) self.assertEqual(45.0, sess.run('f:0')) self.assertEqual(45.0, sess.run(u'f:0')) self.assertEqual(45.0, sess.run(b'f:0')) self.assertEqual(45.0, sess.run(r'f:0')) def testIncorrectGraph(self): with ops.Graph().as_default() as g_1: c_1 = constant_op.constant(1.0, name='c') with ops.Graph().as_default() as g_2: c_2 = constant_op.constant(2.0, name='c') self.assertEqual('c', c_1.op.name) self.assertEqual('c', c_2.op.name) with session.Session(graph=g_1) as sess_1: self.assertEqual(1.0, sess_1.run(c_1)) with self.assertRaises(ValueError): sess_1.run(c_2) with self.assertRaises(ValueError): sess_1.run(c_2.op) with session.Session(graph=g_2) as sess_2: with self.assertRaises(ValueError): sess_2.run(c_1) with self.assertRaises(ValueError): sess_2.run(c_1.op) self.assertEqual(2.0, sess_2.run(c_2)) def testFeedDictKeyException(self): with session.Session() as sess: a = constant_op.constant(1.0, dtypes.float32, name='a') with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'): sess.run(a, feed_dict={'a': [2.0]}) def testPerStepTrace(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: sess.run(constant_op.constant(1.0)) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run(constant_op.constant(1.0), run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run( constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testRunOptionsRunMetadata(self): run_options = config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE) run_metadata = config_pb2.RunMetadata() with ops.device('/cpu:0'): with session.Session() as sess: # all combinations are valid sess.run(constant_op.constant(1.0), options=None, run_metadata=None) sess.run( constant_op.constant(1.0), options=None, run_metadata=run_metadata) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run( constant_op.constant(1.0), options=run_options, run_metadata=None) self.assertTrue(not run_metadata.HasField('step_stats')) sess.run( constant_op.constant(1.0), options=run_options, run_metadata=run_metadata) self.assertTrue(run_metadata.HasField('step_stats')) self.assertEquals(len(run_metadata.step_stats.dev_stats), 1) def testFeedShapeCompatibility(self): with session.Session() as sess: some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0]) new_shape = constant_op.constant([2, 2]) reshaped_tensor = array_ops.reshape(some_tensor, new_shape) with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'): sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]}) with self.assertRaisesRegexp( errors.InvalidArgumentError, 'Input to reshape is a tensor with 4 values, ' 'but the requested shape has 21'): sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]}) def testInferShapesFalse(self): with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant([[1, 2]]) sess = session.Session() self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr) # Avoid lint error regarding 'unused' var a. self.assertTrue(a == a) def testInferShapesTrue(self): config_pb = config_pb2.ConfigProto( graph_options=config_pb2.GraphOptions(infer_shapes=True)) with ops.Graph().as_default(), ops.device('/cpu:0'): a = constant_op.constant([[1, 2]]) sess = session.Session(config=config_pb) self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr) # Avoid lint error regarding 'unused' var a. self.assertTrue(a == a) def testBuildCostModel(self): run_options = config_pb2.RunOptions() config_pb = config_pb2.ConfigProto( allow_soft_placement=True, graph_options=config_pb2.GraphOptions(build_cost_model=100)) with session.Session(config=config_pb) as sess: with ops.device('/device:GPU:0'): a = array_ops.placeholder(dtypes.float32, shape=[]) b = math_ops.add(a, a) c = array_ops.identity(b) d = math_ops.multiply(c, c) for step in xrange(120): run_metadata = config_pb2.RunMetadata() sess.run( d, feed_dict={a: 1.0}, options=run_options, run_metadata=run_metadata) if step == 99: self.assertTrue(run_metadata.HasField('cost_graph')) else: self.assertFalse(run_metadata.HasField('cost_graph')) def runTestOutputPartitionGraphs(self, sess): run_options = config_pb2.RunOptions(output_partition_graphs=True) a = constant_op.constant(1) run_metadata = config_pb2.RunMetadata() sess.run(a, options=run_options, run_metadata=run_metadata) self.assertGreater(len(run_metadata.partition_graphs), 0) sess.run(a, run_metadata=run_metadata) self.assertEqual(len(run_metadata.partition_graphs), 0) @test_util.run_v1_only('b/120545219') def testOutputPartitionGraphsDirect(self): self.runTestOutputPartitionGraphs(session.Session()) @test_util.run_v1_only('b/120545219') def testOutputPartitionGraphsDistributed(self): server = server_lib.Server.create_local_server() self.runTestOutputPartitionGraphs(session.Session(server.target)) def testNonInteractiveSessionNesting(self): sess1 = session.Session() sess1_controller = sess1.as_default() sess1_controller.__enter__() sess2 = session.Session() sess2_controller = sess2.as_default() sess2_controller.__enter__() with self.assertRaisesRegexp(AssertionError, 'Nesting violated'): sess1_controller.__exit__(None, None, None) ops._default_session_stack.reset() def testInteractiveSessionNesting(self): sess1 = session.InteractiveSession() sess2 = session.InteractiveSession() del sess1 del sess2 @test_util.run_v1_only('b/120545219') def testAsDefault(self): c = constant_op.constant(37) sess = session.Session() with sess.as_default(): self.assertEqual(37, c.eval()) # Ensure that the session remains valid even when it is not captured. with session.Session().as_default(): self.assertEqual(37, c.eval()) def testReentry(self): sess = session.Session() with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'): with sess: with sess: pass def testInvalidArgument(self): with self.assertRaisesRegexp(TypeError, 'target must be a string'): session.Session(37) with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'): session.Session(config=37) with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'): session.Session(graph=37) @test_util.run_v1_only('b/120545219') def testTimeoutWithShortOperations(self): num_epochs = 5 q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()]) enqueue_op = q.enqueue_many(constant_op.constant([1, 2])) # Use a 10-second timeout, which should be longer than any # non-blocking enqueue_many op. config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=10000) with session.Session(config=config_pb) as sess: for _ in range(num_epochs): sess.run(enqueue_op) self.assertEqual(sess.run(q.size()), num_epochs * 2) @test_util.run_v1_only('b/120545219') def testRegisterFetchAndFeedConversionFunctions(self): class SquaredTensor(object): def __init__(self, tensor): self.sq = math_ops.square(tensor) fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0]) feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)] feed_fn2 = lambda feed: [feed.sq] session.register_session_run_conversion_functions(SquaredTensor, fetch_fn, feed_fn1, feed_fn2) with self.assertRaises(ValueError): session.register_session_run_conversion_functions(SquaredTensor, fetch_fn, feed_fn1, feed_fn2) with self.cached_session() as sess: np1 = np.array([1.0, 1.5, 2.0, 2.5]) np2 = np.array([3.0, 3.5, 4.0, 4.5]) squared_tensor = SquaredTensor(np2) squared_eval = sess.run(squared_tensor) self.assertAllClose(np2 * np2, squared_eval) squared_eval = sess.run( squared_tensor, feed_dict={ squared_tensor: np1 * np1 }) self.assertAllClose(np1 * np1, squared_eval) partial_run = sess.partial_run_setup([squared_tensor], []) squared_eval = sess.partial_run(partial_run, squared_tensor) self.assertAllClose(np2 * np2, squared_eval) def testDefaultLogDevicePlacement(self): class CaptureStderr(str): """Class to capture stderr from C++ shared library.""" def __enter__(self): self._esc = compat.as_str('\b') self._output = compat.as_str('') self._stderr = sys.stderr self._fd = self._stderr.fileno() self._out_pipe, in_pipe = os.pipe() # Save the original io stream. self._dup_fd = os.dup(self._fd) # Replace the original io stream with in pipe. os.dup2(in_pipe, self._fd) return self def __exit__(self, *args): self._stderr.write(self._esc) self._stderr.flush() self.read() os.close(self._out_pipe) # Restore the original io stream. os.dup2(self._dup_fd, self._fd) def read(self): while True: data = os.read(self._out_pipe, 1) if not data or compat.as_str(data) == self._esc: break self._output += compat.as_str(data) def __str__(self): return self._output if context.executing_eagerly(): context.set_log_device_placement(True) with CaptureStderr() as log: a = constant_op.constant(1) b = constant_op.constant(2) c = a + b else: # Passing the config to the server, but not the session should still # result in logging device placement. config_pb = config_pb2.ConfigProto(log_device_placement=True) server = server_lib.Server.create_local_server(config=config_pb) a = constant_op.constant(1) b = constant_op.constant(2) c = a + b with session.Session(server.target) as sess: with CaptureStderr() as log: sess.run(c) # Ensure that we did log device placement. self.assertTrue('/replica:0/task:0/device:CPU:0' in str(log), str(log)) @test_util.run_v1_only('b/120545219') def testLocalMasterSessionTimeout(self): # Test that the timeout passed in a config to the session works correctly. config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000) server = server_lib.Server.create_local_server() q = data_flow_ops.FIFOQueue(1, dtypes.float32) dequeued_t = q.dequeue() with session.Session(server.target, config=config_pb) as sess: # Intentionally do not run any enqueue_ops so that dequeue will block # until operation_timeout_in_ms. with self.assertRaises(errors.DeadlineExceededError): sess.run(dequeued_t) @test_util.run_v1_only('b/120545219') def testDefaultServerTimeout(self): # Test that the default server config timeout gets used when no Session # config is provided. config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000) server = server_lib.Server.create_local_server(config=config_pb) q = data_flow_ops.FIFOQueue(1, dtypes.float32) dequeued_t = q.dequeue() with session.Session(server.target) as sess: # Intentionally do not run any enqueue_ops so that dequeue will block # until operation_timeout_in_ms. with self.assertRaises(errors.DeadlineExceededError): sess.run(dequeued_t) def runTestBuildGraphError(self, sess): # Ensure that errors from building the graph get propagated. data = array_ops.placeholder(dtypes.float32, shape=[]) # pylint: disable=protected-access enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False) enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False) # pylint: enable=protected-access res = math_ops.add(enter_1, enter_2) with self.assertRaisesOpError('has inputs from different frames'): sess.run(res, feed_dict={data: 1.0}) @test_util.run_v1_only('b/120545219') def testBuildGraphErrorDirect(self): self.runTestBuildGraphError(session.Session()) @test_util.run_v1_only('b/120545219') def testBuildGraphErrorDist(self): server = server_lib.Server.create_local_server() self.runTestBuildGraphError(session.Session(server.target)) def testDeviceAttributes(self): attrs = session._DeviceAttributes( '/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000) self.assertEqual(1337, attrs.memory_limit_bytes) self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name) self.assertEqual('TYPE', attrs.device_type) self.assertEqual(1000000, attrs.incarnation) str_repr = '%s' % attrs self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr) def testDeviceAttributesCanonicalization(self): attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1', 'TYPE', 1337, 1000000) self.assertEqual(1337, attrs.memory_limit_bytes) self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name) self.assertEqual('TYPE', attrs.device_type) self.assertEqual(1000000, attrs.incarnation) str_repr = '%s' % attrs self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr) def runTestAddFunctionToSession(self, target=''): """Add a function to a session after the graph has already been run.""" @function.Defun(dtypes.float32) def foo(x): return x + 1 x = constant_op.constant(1.0) with session.Session(target=target) as sess: sess.run(x) f = foo(x) result = sess.run(f) self.assertEqual(result, 2.0) @test_util.run_v1_only('b/120545219') def testAddFunctionToSession(self): self.runTestAddFunctionToSession() @test_util.run_v1_only('b/120545219') def testAddFunctionToGrpcSession(self): server = server_lib.Server.create_local_server() self.runTestAddFunctionToSession(server.target) def testOpenAndCloseGrpcSession(self): server = server_lib.Server.create_local_server() with session.Session(server.target): pass def testOpenAndCloseSession(self): with session.Session(): pass @test_util.run_v1_only('b/120545219') def testAutoConvertAndCheckData(self): with self.cached_session() as sess: a = array_ops.placeholder(dtype=dtypes.string) with self.assertRaisesRegexp( TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'): sess.run(a, feed_dict={a: 1}) @test_util.run_v1_only('b/120545219') def testOptimizerOptions(self): config.set_optimizer_experimental_options({'min_graph_nodes': -1}) with ops.Graph().as_default(): sess = session.Session() self.assertEqual( sess._config.graph_options.rewrite_options.min_graph_nodes, -1) if __name__ == '__main__': googletest.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/client/session_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class DefFunctionTest(test.TestCase): def testCompileFunctionWithXLA(self): def fn(x): return array_ops.unique(x).y # Unique is not supported by XLA func = def_function.function(fn, experimental_compile=False) xla_func = def_function.function(fn, experimental_compile=True) inputs = constant_op.constant([1, 2, 2, 3, 3]) self.assertAllClose([1, 2, 3], func(inputs)) with self.assertRaisesRegexp(errors.InvalidArgumentError, 'node is not compilable'): xla_func(inputs) if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/def_function_xla_jit_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Code for backpropagation using the tape utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python import pywrap_tensorflow from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.util import compat VSpace = collections.namedtuple("VSpace", [ "aggregate_fn", "num_elements_fn", "zeros_fn", "ones_fn", "graph_shape_fn" ]) def imperative_grad(tape, target, sources, output_gradients=None, sources_raw=None, unconnected_gradients=UnconnectedGradients.NONE): """Computes gradients from the imperatively defined tape on top of the stack. Works by filtering the tape, computing how many downstream usages are of each tensor and entry, and repeatedly applying backward functions until we have gradients for all sources. Args: tape: the gradient tape which stores the trace. target: either a Tensor or list of Tensors to be differentiated. sources: list of Tensors for which we want gradients output_gradients: if not None, a list of gradient provided for each Target, or None if we are to use the target's computed downstream gradient. sources_raw: if not None, a list of the source python objects from which the sources were generated. Should have the same length as sources. Only needs to be populated if unconnected_gradients is 'zero'. unconnected_gradients: determines the value returned if the target and sources are unconnected. When 'none' the value returned is None wheras when 'zero' a zero tensor in the same shape as the sources is returned. Returns: the gradient wrt each of the sources. Raises: ValueError: if the arguments are invalid. RuntimeError: if something goes wrong. """ try: unconnected_gradients = UnconnectedGradients(unconnected_gradients) except ValueError: raise ValueError( "Unknown value for unconnected_gradients: %r" % unconnected_gradients) return pywrap_tensorflow.TFE_Py_TapeGradient( tape._tape, # pylint: disable=protected-access target, sources, output_gradients, sources_raw, compat.as_str(unconnected_gradients.value))
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/imperative_grad.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.core.protobuf import config_pb2 from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class DefunCollectionTest(test.TestCase, parameterized.TestCase): @parameterized.named_parameters( dict(testcase_name='Defun', function_decorator=function.defun), dict( testcase_name='DefFunction', function_decorator=def_function.function)) def testCollectionValueAccess(self, function_decorator): """Read values from graph collections inside of defun.""" with ops.Graph().as_default() as g: with self.session(graph=g): x = 2 y = 5 ops.add_to_collection('x', x) ops.add_to_collection('y', y) @function_decorator def fn(): x_const = constant_op.constant(ops.get_collection('x')[0]) y_const = constant_op.constant(ops.get_collection('y')[0]) z = math_ops.add(x_const, y_const) ops.add_to_collection('z', 7) return z self.assertEqual(7, int(self.evaluate(fn()))) self.assertEquals(ops.get_collection('x'), [2]) self.assertEquals(ops.get_collection('y'), [5]) self.assertEquals(ops.get_collection('z'), []) @parameterized.named_parameters( dict(testcase_name='Defun', function_decorator=function.defun), dict( testcase_name='DefFunction', function_decorator=def_function.function)) def testCollectionVariableValueAccess(self, function_decorator): """Read variable value from graph collections inside of defun.""" with ops.Graph().as_default() as g: with self.session(graph=g): v = resource_variable_ops.ResourceVariable(1.0) @function_decorator def f(): return v.read_value() self.evaluate(variables.global_variables_initializer()) self.assertEqual(1.0, float(self.evaluate(f()))) self.assertEquals( len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 1) def testCollectionVariableValueWrite(self): """Write variable value inside defun.""" with ops.Graph().as_default() as g: with self.session(graph=g): @function.defun def f(): v = resource_variable_ops.ResourceVariable(2.0) return v _ = f.get_concrete_function() self.evaluate(variables.global_variables_initializer()) self.assertEqual(2.0, float(self.evaluate(f()))) self.assertEquals( len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 1) if __name__ == '__main__': ops.enable_eager_execution( config=config_pb2.ConfigProto(device_count={'CPU': 4})) test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/function_defun_collection_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools import itertools import multiprocessing.pool import sys import time import weakref from absl.testing import parameterized import numpy from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import keras from tensorflow.python.autograph.core import ag_ctx from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop from tensorflow.python.eager import cancellation from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import func_graph from tensorflow.python.framework import function as tf_function from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_ops from tensorflow.python.framework import test_util from tensorflow.python.keras.engine import training as keras_training from tensorflow.python.keras.layers import core from tensorflow.python.keras.optimizer_v2 import adam from tensorflow.python.layers import convolutional from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import gen_functional_ops from tensorflow.python.ops import gen_random_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import list_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test from tensorflow.python.training import training_ops from tensorflow.python.util import compat from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect def total_function_cache(defined): # pylint: disable=protected-access return (set(defined._function_cache.primary) | set(defined._function_cache.arg_relaxed)) # pylint: enable=protected-access class MiniModel(keras_training.Model): """Minimal model for mnist. Useful for testing and debugging on slow TPU simulators. """ def __init__(self): super(MiniModel, self).__init__(name='') self.fc = keras.layers.Dense(1, name='fc', kernel_initializer='ones', bias_initializer='ones') def call(self, inputs, training=True): return self.fc(inputs) class DefunnedMiniModel(MiniModel): @function.defun def call(self, inputs, training=True): return super(DefunnedMiniModel, self).call(inputs, training=training) def _example_indexed_slices_with_dense_shape(): return indexed_slices.IndexedSlices( constant_op.constant([1, 2]), constant_op.constant([0, 1]), constant_op.constant([2])) def _example_indexed_slices_without_dense_shape(): return indexed_slices.IndexedSlices( constant_op.constant([1, 2]), constant_op.constant([0, 1])) class FunctionTest(test.TestCase, parameterized.TestCase): def setUp(self): super(FunctionTest, self).setUp() cpus = config.list_physical_devices('CPU') # Set 4 virtual CPUs config.set_virtual_device_configuration(cpus[0], [ context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration() ]) def testBasic(self): matmul = def_function.function(math_ops.matmul) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) sq = matmul(t, t, transpose_a=True) sq2 = matmul(sq, t, transpose_a=True) self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20]) self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108]) def testVariable(self): v1 = variables.Variable(1.0) add = def_function.function(lambda x, v: x + v1 + v) v2 = variables.Variable(1.0) x = constant_op.constant(1.0) r = add(x, v2) self.assertEqual(3.0, self.evaluate(r)) def testVariableOnly(self): v = variables.Variable(1.0) add = def_function.function(lambda x: x.assign_add(1.0)) r1 = add(v) self.assertEqual(2.0, self.evaluate(r1)) c = constant_op.constant(1.0) with self.assertRaisesRegexp(AttributeError, 'no attribute'): add(c) def testExternalControlDependency(self): with ops.Graph().as_default(), self.test_session(): v = variables.Variable(1.0) v.initializer.run() op = v.assign_add(1.0) @function.defun def f(): with ops.control_dependencies([op]): return 1.0 self.evaluate(f()) self.assertAllEqual(self.evaluate(v), 2.0) def testInputShapeFunctionRelaxation(self): unknown_dim = [False] @function.defun(experimental_relax_shapes=True) def func(a): if a._shape_tuple()[0] is None: unknown_dim[0] = True return a + 1 func(constant_op.constant([])) self.assertFalse(unknown_dim[0]) self.assertLen(total_function_cache(func), 1) func(constant_op.constant([1.0])) self.assertFalse(unknown_dim[0]) self.assertLen(total_function_cache(func), 2) func(constant_op.constant([1.0, 2.0])) self.assertTrue(unknown_dim[0]) self.assertLen(total_function_cache(func), 2) def testCaptureNonTrainableVariable(self): v = variables.Variable(1.0, trainable=False) @def_function.function def f(): return v + 1 c = f.get_concrete_function() self.assertEqual(len(list(c.graph.variables)), 1) # pylint: disable=g-generic-assert def testNestedInputShapeFunctionRelaxation(self): unknown_dim = [False] @function.defun(experimental_relax_shapes=True) def func(a_, b_=None): del a_ # Only used to check which cache is used. self.assertEqual(b_[0]._shape_tuple(), ()) if b_[1]._shape_tuple()[0] is None: unknown_dim[0] = True return b_[0] + 1 a = 'hi' b0 = constant_op.constant(1.0) func(a, b_=[b0, constant_op.constant([])]) self.assertFalse(unknown_dim[0]) self.assertLen(total_function_cache(func), 1) func(a, b_=[b0, constant_op.constant([1.0])]) self.assertFalse(unknown_dim[0]) self.assertLen(total_function_cache(func), 2) func(a, b_=[b0, constant_op.constant([1.0, 1.0])]) self.assertTrue(unknown_dim[0]) self.assertLen(total_function_cache(func), 2) unknown_dim[0] = False # Now do the same except with a new a which is not a tensor; this should # change the cache key. a = 'bye' func(a, b_=[b0, constant_op.constant([])]) self.assertFalse(unknown_dim[0]) self.assertLen(total_function_cache(func), 3) # Since we already marked a cache miss for a function with the same # non-input signatures, here we will immediately start relaxing shapes. func(a, b_=[b0, constant_op.constant([1.0])]) self.assertTrue(unknown_dim[0]) self.assertLen(total_function_cache(func), 3) def testFunctionRelaxationLosesInnerDimWithKerasLayer(self): layer = keras.layers.Dense(1) fn = def_function.function(experimental_relax_shapes=True)(layer) with self.captureWritesToStream(sys.stderr) as printed: fn(array_ops.ones((3, 2))) self.assertNotIn('ValueError', printed.contents()) with self.captureWritesToStream(sys.stderr) as printed: # Use batch size 2 to trigger a second cache miss on the shape. fn(array_ops.ones((2, 2))) self.assertNotIn('ValueError', printed.contents()) # Shape relaxation passes TensorShape([None, None]), which causes layer # matmul to fail, due to incompatible dims. What would have been a graph # build time error (layer would complain about the inner dim being 4). with self.captureWritesToStream(sys.stderr) as printed: with self.assertRaisesRegexp(errors.InvalidArgumentError, r'Matrix size-incompatible'): fn(array_ops.ones((3, 4))) def testNestedShapeFunctionRelaxation(self): got_shape = [None] # The inner function will go through shape relaxation because the shapes it # receives will be [1], [2], [3], ... @def_function.function(experimental_relax_shapes=True) def bar(x_shape): got_shape[0] = x_shape._shape_tuple() return x_shape # The outer function will not go through shape relaxation because the shapes # it receives will be [1], [[1]], [[[1]]], ... @def_function.function(experimental_relax_shapes=True) def foo(ones): return bar(array_ops.shape(ones)) for rank in range(1, 6): x_shape = self.evaluate(foo(array_ops.ones([1] * rank))) self.assertAllEqual(x_shape, [1] * rank) if rank < 3: self.assertEqual(got_shape[0], (rank,)) else: self.assertEqual(got_shape[0], (None,)) def testNoHash(self): @def_function.function() def f(_): return 1.0 with self.assertRaisesRegexp(AttributeError, 'set'): f(set([])) def testFuncName(self): @function.defun_with_attributes(attributes={'func_name': 'multiply'}) def add(x, y): _ = x * y return x + y @function.defun def add_2(x, y): _ = x * y return x + y self.assertEqual(add._name, 'multiply') self.assertEqual(add_2._name, 'add_2') def testBasicGraphMode(self): matmul = def_function.function(math_ops.matmul) @def_function.function def sq(a): return matmul(a, a) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) out = sq(t) self.assertAllEqual(out, math_ops.matmul(t, t).numpy()) def testNestedInputsGraphMode(self): matmul = def_function.function(math_ops.matmul) pair = collections.namedtuple('pair', ['a', 'b']) @def_function.function def a_times_b(inputs): return matmul(inputs.a['a'], inputs.b['b']) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) out = a_times_b(pair({'a': t}, {'b': t})) self.assertAllEqual(out, math_ops.matmul(t, t).numpy()) def testNestedOutputsGraphMode(self): matmul = def_function.function(math_ops.matmul) pair = collections.namedtuple('pair', ['a', 'b']) @def_function.function() def pairs_mul(pair_a, pair_b): return pair(matmul(pair_a.a, pair_b.a), matmul(pair_a.b, pair_b.b)) a = constant_op.constant([[1.0, 2.0], [1.0, 2.0]]) b = constant_op.constant([[3.0, 4.0], [3.0, 4.0]]) out = pairs_mul(pair(a, b), pair(b, a)) expected = pair(math_ops.matmul(a, b).numpy(), math_ops.matmul(b, a).numpy()) self.assertAllClose(out, expected) def testGraphEagerIsolation(self): @function.defun def f(): self.v = variables.Variable(1.0) return self.v.read_value() self.assertAllEqual(f(), 1.0) with ops.Graph().as_default(): self.assertEqual(f().shape, ()) def testBasicGraphFunction(self): matmul = def_function.function(math_ops.matmul) @def_function.function def sq(a): return matmul(a, a) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) sq_op = sq.get_concrete_function(t) self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2])) out = sq_op(t) self.assertAllEqual(out, math_ops.matmul(t, t).numpy()) def testInputSpecGraphFunction(self): matmul = def_function.function(math_ops.matmul) @def_function.function def sq(a): return matmul(a, a) sq_op = sq.get_concrete_function( tensor_spec.TensorSpec((None, None), dtypes.float32)) self.assertEqual([None, None], sq_op.output_shapes.as_list()) t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) out1 = sq_op(t1) self.assertAllEqual(out1, math_ops.matmul(t1, t1).numpy()) t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) out2 = sq_op(t2) self.assertAllEqual(out2, math_ops.matmul(t2, t2).numpy()) def testNestedInputSpecGraphFunction(self): matmul = def_function.function(math_ops.matmul) @def_function.function def sq(mats): ((a, b),) = mats return matmul(a, b) sq_op_autonamed = sq.get_concrete_function( [(tensor_spec.TensorSpec((None, None), dtypes.float32), tensor_spec.TensorSpec((None, None), dtypes.float32))]) self.assertEqual([None, None], sq_op_autonamed.output_shapes.as_list()) sq_op = sq.get_concrete_function( [(tensor_spec.TensorSpec((None, None), dtypes.float32, name='first_mat'), tensor_spec.TensorSpec((None, None), dtypes.float32, name='second_mat'))]) self.assertEqual([None, None], sq_op.output_shapes.as_list()) t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t2 = constant_op.constant([[1.4, 2.4], [3.4, 4.4]]) out = sq_op(first_mat=t1, second_mat=t2) self.assertAllEqual(out, math_ops.matmul(t1, t2).numpy()) self.assertAllEqual(sq_op_autonamed(t1, t2), math_ops.matmul(t1, t2).numpy()) def testExecutingStatelessDefunConcurrently(self): @def_function.function def stateless(x): return math_ops.multiply(2.0, x) pool = multiprocessing.pool.ThreadPool() inputs = [constant_op.constant(1.0 * x) for x in range(100)] outputs = [float(out) for out in pool.map(stateless, inputs)] expected = [float(2.0 * x) for x in inputs] self.assertSequenceEqual(outputs, expected) def testExecutingManyStatelessDefunsConcurrently(self): @def_function.function def stateless(x): del x return math_ops.multiply(2.0, 2.0) pool = multiprocessing.pool.ThreadPool() # `pool.map` below instantiates 100 functions, one for each object. objects = [object() for _ in range(100)] outputs = [float(out) for out in pool.map(stateless, objects)] expected = [4.0] * 100 self.assertSequenceEqual(outputs, expected) def testExecutingStatefulDefunConcurrently(self): v = resource_variable_ops.ResourceVariable(1.0) @def_function.function def stateful(x): v.assign(x) pool = multiprocessing.pool.ThreadPool() inputs = [constant_op.constant(0.0)] * 100 pool.map(stateful, inputs) self.assertEqual(float(v.read_value()), 0.0) def testExecutingManyStatefulDefunsConcurrently(self): v = resource_variable_ops.ResourceVariable(1.0) @def_function.function def stateful(x): del x return v.assign(0.0) pool = multiprocessing.pool.ThreadPool() # `pool.map` below instantiates 100 functions, one for each object. pool.map(stateful, [object() for _ in range(100)]) self.assertEqual(float(v.read_value()), 0.0) def disabled_testRandomSeed(self): @def_function.function def f(): return random_ops.random_normal(()) random_seed.set_random_seed(1) x = f() self.assertNotEqual(x, f()) random_seed.set_random_seed(1) self.assertAllEqual(f(), x) def testNestedInputsGraphFunction(self): matmul = def_function.function(math_ops.matmul) pair = collections.namedtuple('pair', ['a', 'b']) @def_function.function def a_times_b(inputs): return matmul(inputs.a['a'], inputs.b['b']) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) sq_op = a_times_b.get_concrete_function( pair(dict(a=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'a')), dict(b=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'b')))) self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2])) out = sq_op(a=t, b=t) self.assertAllEqual(out, math_ops.matmul(t, t).numpy()) def testNestedOutputGraphFunction(self): matmul = def_function.function(math_ops.matmul) @def_function.function def sq(a): return (matmul(a, a), {'b': constant_op.constant(1.0)}) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) sq_op = sq.get_concrete_function(t) self.assertEqual(sq_op.output_shapes, (tensor_shape.TensorShape([2, 2]), {'b': tensor_shape.TensorShape([])})) self.assertEqual(sq_op.output_dtypes, (dtypes.float32, {'b': dtypes.float32})) (a, b) = sq_op(t) self.assertAllEqual(a, math_ops.matmul(t, t).numpy()) self.assertAllEqual(b['b'].numpy(), 1.0) def testGraphFunctionNoneOutput(self): @def_function.function def fn(unused_a, unused_b): return None x = constant_op.constant(1) fn_op = fn.get_concrete_function(x, x) self.assertEqual(fn_op.output_dtypes, None) self.assertEqual(fn_op.output_shapes, None) self.assertAllEqual(fn_op(x, x), None) def testDefunNumpyArraysConvertedToTensors(self): def f(x): self.assertIsInstance(x, ops.Tensor) return x x = random_ops.random_uniform([2, 2]).numpy() defined = function.defun(f) defined(x) self.assertLen(total_function_cache(defined), 1) x = random_ops.random_uniform([2, 2]).numpy() defined(x) # A NumPy array with different values but the same shape and dtype # shouldn't trigger another function definition. self.assertLen(total_function_cache(defined), 1) # Test that the numpy array is properly an argument to the graph function. self.assertEqual(1., defined(numpy.ones([])).numpy()) self.assertEqual(0., defined(numpy.zeros([])).numpy()) self.assertEqual(1., defined(array_ops.ones([])).numpy()) self.assertEqual(0., defined(array_ops.zeros([])).numpy()) def testDefunCapturedInt32(self): x = constant_op.constant(1, dtype=dtypes.int32) @def_function.function def add_int32s(): return x + x self.assertEqual(2, int(add_int32s())) def testDefunReadVariable(self): v = resource_variable_ops.ResourceVariable(1.0) @def_function.function def f(): return v.read_value() self.assertEqual(1.0, float(f())) def testDefunAssignAddVariable(self): v = resource_variable_ops.ResourceVariable(1.0) x = constant_op.constant(2.0) @def_function.function def test_assign_add(): v.assign_add(x) return v.read_value() self.assertEqual(3.0, float(test_assign_add())) @test_util.run_in_graph_and_eager_modes def testTensorInitializationInFunctionRaisesError(self): error_msg = ('Tensor-typed variable initializers must either be ' 'wrapped in an init_scope or callable.*') @def_function.function def tensor_init(): with self.assertRaisesRegexp(ValueError, error_msg): resource_variable_ops.ResourceVariable(constant_op.constant(2.0)) tensor_init() @test_util.run_in_graph_and_eager_modes def testCallableTensorInitializationInFunction(self): @def_function.function def tensor_init(): self.v = resource_variable_ops.ResourceVariable( lambda: constant_op.constant(2.0)) return self.v.read_value() value = tensor_init() if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.assertEqual(self.evaluate(value), 2.0) @test_util.also_run_as_tf_function def testInitScopeTensorInitializationInFunction(self): @def_function.function def tensor_init(): with ops.init_scope(): const = constant_op.constant(2.0) # Note: this variable bypasses tf.function's variable creation # requirements by bypassing variable_creator_scope by using # ResourceVariable instead of Variable. self.v = resource_variable_ops.ResourceVariable(const) return self.v.read_value() value = tensor_init() self.assertAllEqual(value, 2.0) @test_util.run_in_graph_and_eager_modes def testGetConcreteFunctionCreatesVariables(self): v_holder = [] @def_function.function def tensor_init(): if not v_holder: v_holder.append(variables.Variable(5.)) return v_holder[0].read_value() concrete = tensor_init.get_concrete_function() self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(5., self.evaluate(concrete())) self.assertAllEqual(5., self.evaluate(tensor_init())) def testFuncGraphCaptureByValue(self): v = variables.Variable(1.0) def trivial_function(): return v.read_value() graph_function = function.Function( trivial_function, 'test', capture_by_value=True) self.assertAllEqual(graph_function(), 1.0) v.assign(2.0) self.assertAllEqual(graph_function(), 1.0) def testFuncGraphCaptureByValueNested(self): v = variables.Variable(1.0) def trivial_function(): return control_flow_ops.cond( array_ops.placeholder_with_default(True, ()), v.read_value, v.read_value) graph_function = function.Function( trivial_function, 'test', capture_by_value=True) self.assertAllEqual(graph_function(), 1.0) v.assign(2.0) self.assertAllEqual(graph_function(), 1.0) def testDefunShapeInferenceWithCapturedResourceVariable(self): v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]]) def f(): x = constant_op.constant([[1, 2], [3, 4]]) out = math_ops.matmul(v, x) self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2])) # We do not return v directly since the tensor conversion function of # ResourceVariable returns the read value and not the resource itself. return v._handle compiled = def_function.function(f) var_handle = compiled() self.assertEqual(var_handle.dtype, dtypes.resource) self.assertEqual(var_handle.shape, tensor_shape.TensorShape([])) var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype) self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2])) def testShapeInferenceForMoreSpecificInput(self): def f(a): return array_ops.reshape(a, [-1, 3]) signature = [tensor_spec.TensorSpec(None, dtypes.float32)] compiled = def_function.function(f, input_signature=signature) @def_function.function def use_f(): inputs = array_ops.zeros([10, 10, 3]) self.assertAllEqual(f(inputs).shape, compiled(inputs).shape) use_f() def testFuncListAttr(self): @function.defun def test_function(val): def fn1(): return array_ops.ones([10]) fn2 = lambda: array_ops.ones([10]) * 2 def fn3(x=3): return array_ops.ones([10]) * x fn4 = functools.partial(fn3, x=4) fn5 = functools.partial(fn3, 5) return gen_functional_ops.case(val, [], [dtypes.float32], [function.defun(f).get_concrete_function() for f in (fn1, fn2, fn3, fn4, fn5)]) ones = array_ops.ones([10]) self.assertAllEqual([ones], test_function(0)) self.assertAllEqual([ones * 2], test_function(1)) self.assertAllEqual([ones * 3], test_function(2)) self.assertAllEqual([ones * 4], test_function(3)) self.assertAllEqual([ones * 5], test_function(4)) self.assertAllEqual([ones * 5], test_function(22)) # default branch @test_util.enable_control_flow_v2 def testVariableInLoopInFunction(self): @function.defun def test_function(): def loop_test(_): return False def loop_body(_): return variable_scope.get_variable('a', shape=()) return control_flow_ops.while_loop(loop_test, loop_body, [0.0]) self.assertEqual(test_function().shape, []) def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self): with context.graph_mode(): v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]]) def f(): x = constant_op.constant([[1, 2], [3, 4]]) out = math_ops.matmul(v, x) self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2])) # We do not return v directly since the tensor conversion function of # ResourceVariable returns the read value and not the resource itself. return v._handle compiled = def_function.function(f) var_handle = compiled() self.assertEqual(var_handle.dtype, dtypes.resource) self.assertEqual(var_handle.shape, tensor_shape.TensorShape([])) var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype) self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2])) def testDefunShapeInferenceWithCapturedVariableInGraphMode(self): with context.graph_mode(): v = variables.Variable([[1, 2], [3, 4]]) def f(): x = constant_op.constant([[1, 2], [3, 4]]) out = math_ops.matmul(v, x) self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2])) # Check that shape inference works while creating the defun compiled = def_function.function(f) compiled() def testDefunShapeInferenceWithCapturedTensorListInGraphMode(self): with context.graph_mode(): tensor_list = list_ops.empty_tensor_list( element_dtype=dtypes.float32, element_shape=ops.convert_to_tensor([], dtype=dtypes.int32)) tensor_list = list_ops.tensor_list_push_back(tensor_list, constant_op.constant(1.0)) tensor_list = list_ops.tensor_list_push_back(tensor_list, constant_op.constant(2.0)) def f(): tl, value = list_ops.tensor_list_pop_back( tensor_list, element_dtype=dtypes.float32) self.assertEqual(value.shape, tensor_shape.TensorShape([])) return tl compiled = def_function.function(f) output_tensor_list = compiled() _, value = list_ops.tensor_list_pop_back( output_tensor_list, element_dtype=dtypes.float32) self.assertEqual(value.shape, tensor_shape.TensorShape([])) @test_util.run_in_graph_and_eager_modes def testDefunForcesResourceVariables(self): def variable_creator(): self.v = variables.Variable(0.0) return self.v.read_value() self.v = None defined = function.defun(variable_creator) defined() # Create the variable. self.assertIsInstance( self.v, resource_variable_ops.ResourceVariable) def testRunMetadata(self): @def_function.function def f(x): return x * x with ops.device('cpu:0'): context.enable_run_metadata() f(constant_op.constant(1.0)) run_metadata = context.export_run_metadata() context.disable_run_metadata() self.assertLen(run_metadata.partition_graphs, 1) def testGraphModeCaptureVariable(self): with context.graph_mode(), self.cached_session(): class HasAVar(object): def __init__(self): self.v = resource_variable_ops.ResourceVariable(1.0) def call(self): return self.v * 2 o = HasAVar() self.evaluate(variables.global_variables_initializer()) call = def_function.function(o.call) op = call() self.assertAllEqual(self.evaluate(op), 2.0) def testGraphModeManyFunctions(self): with ops.Graph().as_default(), self.cached_session(): @def_function.function def f(x): return x * x @def_function.function def g(x): return f(x) + 1 self.assertAllEqual(g(constant_op.constant(2.0)).eval(), 5.0) def testDict(self): @def_function.function def f(x): return {'name': x + 1} self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0) def testTensorConversionWithDefun(self): @def_function.function def f(x): return math_ops.add(x, constant_op.constant(3)) self.assertAllEqual(5, f(constant_op.constant(2))) def testTensorConversionCall(self): @def_function.function def f(x): return math_ops.add(x, constant_op.constant(3)) @def_function.function def g(x): return f(f(x)) self.assertAllEqual(8, g(constant_op.constant(2))) def testCallShape(self): @def_function.function def f(x): return x + 1 @def_function.function def g(x): x = f(x) self.assertEqual(x.shape.as_list(), []) return None g(constant_op.constant(1.0)) def testNestedDefunWithNoOutputAndTapedInput(self): three = resource_variable_ops.ResourceVariable(3.0, name='v') @def_function.function def f(x): # This function intentionally takes a taped variable as input, # but does not return any values math_ops.add(x, three) @def_function.function def g(x): y = math_ops.add(x, three) f(y) g(three) def testGatherResourceWithDefun(self): with ops.device('cpu:0'): v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0]) def sum_gather(): return math_ops.reduce_sum(array_ops.gather(v, [1, 2])) defined = def_function.function(sum_gather) self.assertAllEqual(sum_gather(), defined()) @parameterized.named_parameters([ ('IndexedSlicesWithDenseShape', _example_indexed_slices_with_dense_shape,), ('IndexedSlicesWithoutDenseShape', _example_indexed_slices_without_dense_shape,), ('RaggedTensorRaggedRank1', ragged_tensor.RaggedTensor.from_row_lengths, {'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}), ('RaggedTensorRaggedRank2', ragged_tensor.RaggedTensor.from_nested_row_lengths, {'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}), ('SparseTensor', sparse_tensor.SparseTensor, {'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}), ]) # pyformat: disable def testReturnCompositeTensorWithDefun(self, factory_fn, factory_kwargs={}, input_signature=None): input_ct = factory_fn(**factory_kwargs) @def_function.function(input_signature=input_signature) def f(): return input_ct output_ct = f() self.assertIsInstance(output_ct, type(input_ct)) nest.assert_same_structure(input_ct, output_ct, expand_composites=True) input_flat = nest.flatten(input_ct, expand_composites=True) output_flat = nest.flatten(output_ct, expand_composites=True) for (input_component, output_component) in zip(input_flat, output_flat): self.assertAllEqual(input_component, output_component) @parameterized.named_parameters([ ('IndexedSlicesWithDenseShape', _example_indexed_slices_with_dense_shape,), ('IndexedSlicesWithoutDenseShape', _example_indexed_slices_without_dense_shape,), ('RaggedTensorRaggedRank1', ragged_tensor.RaggedTensor.from_row_lengths, {'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}), ('RaggedTensorRaggedRank2', ragged_tensor.RaggedTensor.from_nested_row_lengths, {'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}), ('SparseTensor', sparse_tensor.SparseTensor, {'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}), ('RaggedTensorRaggedRank1WithSignature', ragged_tensor.RaggedTensor.from_row_lengths, {'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}, [ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)]), ('RaggedTensorRaggedRank2WithSignature', ragged_tensor.RaggedTensor.from_nested_row_lengths, {'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}, [ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32)]), ('SparseTensorWithSignature', sparse_tensor.SparseTensor, {'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}, [sparse_tensor.SparseTensorSpec([None], dtypes.int32)]), ]) # pyformat: disable def testCompositeAsArgumentTensorWithDefun(self, factory_fn, factory_kwargs={}, input_signature=None): input_ct = factory_fn(**factory_kwargs) @def_function.function(input_signature=input_signature) def f(x): return x output_ct = f(input_ct) self.assertIsInstance(output_ct, type(input_ct)) nest.assert_same_structure(input_ct, output_ct, expand_composites=True) input_flat = nest.flatten(input_ct, expand_composites=True) output_flat = nest.flatten(output_ct, expand_composites=True) for (input_component, output_component) in zip(input_flat, output_flat): self.assertAllEqual(input_component, output_component) @test_util.run_gpu_only def testFunctionOnDevice(self): x = constant_op.constant([1.]).gpu() f = def_function.function(math_ops.add) y = f(x, x).cpu() self.assertAllEqual(y, [2.]) @test_util.run_gpu_only @test_util.run_in_graph_and_eager_modes def testFunctionWithResourcesOnDifferentDevices(self): with ops.device('/cpu:0'): v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0]) with ops.device('/gpu:0'): v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0]) def sum_gather(): cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2])) gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2])) return cpu_result, gpu_result defined = function.defun(sum_gather) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) expected = self.evaluate(sum_gather()) self.assertAllEqual(expected, self.evaluate(defined())) @test_util.run_gpu_only @test_util.run_in_graph_and_eager_modes def testOpInFunctionWithConflictingResourceInputs(self): with ops.device('/cpu:0'): v_cpu = resource_variable_ops.ResourceVariable( [0.0, 1.0, 2.0], name='cpu') v_also_cpu = resource_variable_ops.ResourceVariable( [0.0, 1.0, 2.0], name='also_cpu') with ops.device('/gpu:0'): v_gpu = resource_variable_ops.ResourceVariable( [0.0, 1.0, 2.0], name='gpu') @def_function.function def resource_apply_adam(): training_ops.resource_apply_adam( v_cpu.handle, v_gpu.handle, v_also_cpu.handle, 1.0, # beta1_power 1.0, # beta2_power 1.0, # learning_rate 1.0, # beta1 1.0, # beta2 1.0, # epsilon, [1.0, 1.0, 1.0], # grad False) # use_locking return None with self.assertRaisesRegexp( errors.InvalidArgumentError, 'Cannot place the graph because a reference or resource edge connects ' 'colocation groups with incompatible assigned devices'): if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.evaluate(resource_apply_adam()) @test_util.run_gpu_only def testFunctionHandlesInputsOnDifferentDevices(self): # The Reshape op requires the shape tensor to be placed in host memory. reshape = def_function.function(array_ops.reshape) value = constant_op.constant([1., 2.]).gpu() shape = constant_op.constant([2, 1]) reshaped = reshape(value, shape).cpu() self.assertAllEqual(reshaped, [[1], [2]]) @test_util.run_gpu_only def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self): # The Reshape op requires the shape tensor to be placed in host memory. reshape = def_function.function(array_ops.reshape) value = constant_op.constant([1., 2.]) shape = constant_op.constant([2, 1]).gpu() reshape(value, shape) # No error is raised def testNoneOutput(self): @def_function.function def my_function(_): return None self.assertAllEqual(my_function(1), None) def testNestedFunctions(self): # TensorFlow function (which is what would be used in TensorFlow graph # construction). @tf_function.Defun(dtypes.int32, dtypes.int32) def add(a, b): return math_ops.add(a, b) @def_function.function def add_one(x): return add(x, 1) self.assertAllEqual(3, add_one(constant_op.constant(2))) def testVariableCaptureInNestedFunctions(self): v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32) @def_function.function def inner_read(): return v.read_value() @def_function.function def outer(): return inner_read() self.assertEqual(1, int(outer())) def testReturnCapturedEagerTensor(self): t = constant_op.constant(1) @def_function.function def read(): return t self.assertEqual(1, int(read())) def testReturnCapturedGraphTensor(self): with context.graph_mode(), self.cached_session(): t = constant_op.constant(1) @def_function.function def read(): return t self.assertEqual(1, int(self.evaluate(read()))) def testSequenceInputs(self): clip_by_global_norm = def_function.function(clip_ops.clip_by_global_norm) t_list = [constant_op.constant(1.0), constant_op.constant(2.0)] clipped_list, global_norm = clip_by_global_norm(t_list, constant_op.constant(.2)) for t in clipped_list: self.assertIsInstance(t, ops.Tensor) self.assertIsInstance(global_norm, ops.Tensor) def testNestedSequenceInputs(self): def my_op(inputs): a, b, c = inputs e, f = b g, h = e return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c my_eager_op = def_function.function(my_op) ret = my_eager_op([ constant_op.constant(1), [(constant_op.constant(2), constant_op.constant(3)), constant_op.constant(4)], constant_op.constant(5) ]) self.assertLen(ret, 2) self.assertAllEqual(ret[0][0], 2) self.assertAllEqual(ret[0][1][0][0], 8) self.assertAllEqual(ret[0][1][0][1], 4) self.assertIsInstance(ret[0][1][0], tuple) self.assertAllEqual(ret[0][1][1], 6) self.assertAllEqual(ret[0][2], 10) self.assertAllEqual(ret[1], 15) def testVariableNamesRespectNameScopesWithDefun(self): @def_function.function def create_variable(): with ops.name_scope('foo'): v = resource_variable_ops.ResourceVariable(0.0, name='bar') self.assertEqual(v.name, 'foo/bar:0') create_variable() def testVariableNamesRespectNameScopesWithDefunInGraph(self): with context.graph_mode(): @def_function.function def create_variable(): with ops.name_scope('foo'): v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar') self.assertEqual(v.name, 'foo/bar:0') with ops.get_default_graph().as_default(): create_variable() @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testLayerInDefun(self): conv = convolutional.Conv2D( filters=1, kernel_size=2, kernel_initializer=init_ops.ones_initializer(), bias_initializer=init_ops.zeros_initializer()) @function.defun def model(x): return conv(x) x = array_ops.ones([1, 2, 2, 1]) y = model(x) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.assertAllClose([[[[4.0]]]], self.evaluate(y)) # Variable lifting is somewhat different between defun/tf.function, so testing # device placement on both makes sense. @parameterized.named_parameters( dict(testcase_name='Defun', function_decorator=function.defun), dict(testcase_name='DefFunction', function_decorator=def_function.function)) @test_util.run_in_graph_and_eager_modes def testVariablesPlacedOnOutsideDevice(self, function_decorator): class _Obj(object): def __init__(self): self.v = None @function_decorator def f(self): if self.v is None: self.v = variables.Variable(1.) return self.v + 1. has_device = _Obj() with ops.device('cpu:0'): has_device.f() self.assertIn('CPU', has_device.v.device) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testDefunKerasModelCall(self): model = MiniModel() model.call = function.defun(model.call) x = array_ops.ones([1, 2]) y = model(x) if not context.executing_eagerly(): self.evaluate(variables.global_variables_initializer()) self.assertAllEqual([[3.0]], self.evaluate(y)) # Break the reference cycle between the MiniModel and the defun: # `MiniModel` --(through its `call` method)--> `Function` # `Function` --(instancemethod on `MiniModel`)--> `MiniModel` del model.call @test_util.run_in_graph_and_eager_modes def testDeviceAnnotationsRespected(self): def multi_device_fn(): with ops.device('/cpu:0'): s0 = test_ops.device_placement_op() with ops.device('/cpu:1'): s1 = test_ops.device_placement_op() with ops.device('/cpu:2'): s2 = test_ops.device_placement_op() s3 = test_ops.device_placement_op() return s0, s1, s2, s3 defined = function.defun(multi_device_fn) outputs = self.evaluate(defined()) self.assertLen(total_function_cache(defined), 1) self.assertIn(compat.as_bytes('CPU:0'), outputs[0]) self.assertIn(compat.as_bytes('CPU:1'), outputs[1]) self.assertIn(compat.as_bytes('CPU:2'), outputs[2]) with ops.device('/cpu:3'): outputs = self.evaluate(defined()) # All function definitions are agnostic to call site devices. self.assertLen(total_function_cache(defined), 1) self.assertIn(compat.as_bytes('CPU:0'), outputs[0]) self.assertIn(compat.as_bytes('CPU:1'), outputs[1]) self.assertIn(compat.as_bytes('CPU:2'), outputs[2]) self.assertIn(compat.as_bytes('CPU:3'), outputs[3]) with ops.device('/cpu:0'): outputs = self.evaluate(defined()) self.assertLen(total_function_cache(defined), 1) self.assertIn(compat.as_bytes('CPU:0'), outputs[0]) self.assertIn(compat.as_bytes('CPU:1'), outputs[1]) self.assertIn(compat.as_bytes('CPU:2'), outputs[2]) self.assertIn(compat.as_bytes('CPU:0'), outputs[3]) @test_util.run_in_graph_and_eager_modes def testCallingGraphFunctionOnDifferentDevice(self): def func(): return constant_op.constant(0) defined = def_function.function(func) with ops.device('cpu:0'): cpu_graph_function = defined.get_concrete_function() with ops.device('cpu:0'): self.assertEqual( self.evaluate(cpu_graph_function()), self.evaluate(func())) with ops.device('cpu:1'): self.assertEqual(0., self.evaluate(cpu_graph_function())) with ops.device(None): self.assertEqual(0., self.evaluate(cpu_graph_function())) default_graph_function = defined.get_concrete_function() self.assertEqual( self.evaluate(default_graph_function()), self.evaluate(func())) with ops.device('cpu:1'): self.assertEqual(0., self.evaluate(default_graph_function())) @test_util.run_gpu_only @test_util.run_in_graph_and_eager_modes def testColocateWithRespected(self): # TODO(b/113291792): Use multiple CPUs instead of a GPU. with ops.device('cpu:0'): x = constant_op.constant(1.0) with ops.device('gpu:0'): y = constant_op.constant(1.0) @def_function.function def foo(): return test_ops.device_placement_op() with ops.colocate_with(x): self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo())) with ops.colocate_with(y): self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo())) def testVariablesAreTracked(self): v = resource_variable_ops.ResourceVariable(1.0) def foo(x): return v * x defined = def_function.function(foo) x = constant_op.constant([1.0]) self.assertEqual(1., self.evaluate(defined(x))) v.assign(2.) x = constant_op.constant([1.0, 2.0]) self.assertAllEqual([2., 4.], self.evaluate(defined(x))) def testCacheObjectHashCollisions(self): class Foo(object): def __hash__(self): return 42 def func(foo): del foo return defined = function.defun(func) defined(Foo()) self.assertLen(total_function_cache(defined), 1) defined(Foo()) self.assertLen(total_function_cache(defined), 2) def testCacheTensorDtypeCollision(self): def func(t): return t + t defined = function.defun(func) t = constant_op.constant([[1.0]], dtype=dtypes.complex64) defined(t) self.assertLen(total_function_cache(defined), 1) t = constant_op.constant([[1.0]], dtype=dtypes.complex128) defined(t) self.assertLen(total_function_cache(defined), 2) def testCacheTensorShapeCollision(self): def func(t): return t + t defined = function.defun(func) t = constant_op.constant([[1.0]], dtype=dtypes.complex64) defined(t) self.assertLen(total_function_cache(defined), 1) t = constant_op.constant([1.0], dtype=dtypes.complex64) defined(t) self.assertLen(total_function_cache(defined), 2) def testCacheTensorShapeDtypeCollision(self): def func(t): return t + t defined = function.defun(func) t = constant_op.constant([[1.0]], dtype=dtypes.complex64) defined(t) self.assertLen(total_function_cache(defined), 1) t = constant_op.constant([1.0], dtype=dtypes.complex128) defined(t) self.assertLen(total_function_cache(defined), 2) def testCacheTensorUnknownShapesCollisionRelaxedShapes(self): def func(t): return t + t with context.graph_mode(), self.cached_session(): defined = function.defun(func, experimental_relax_shapes=True) p = array_ops.placeholder(dtype=dtypes.float32, shape=[]) defined(p) self.assertLen(total_function_cache(defined), 1) p = array_ops.placeholder(dtype=dtypes.float32, shape=[1]) defined(p) self.assertLen(total_function_cache(defined), 2) p = array_ops.placeholder(dtype=dtypes.float32, shape=[2]) defined(p) # Gradual shape relaxation is performed; and the common shape between # [1] and [2] is one containing unknown dimensions. self.assertLen(total_function_cache(defined), 2) # pylint: disable=protected-access self.assertLen(defined._function_cache.arg_relaxed_shapes, 1) relaxed_shapes = ( list(defined._function_cache.arg_relaxed_shapes.values())[0]) self.assertLen(relaxed_shapes, 1) relaxed_shape = relaxed_shapes[0] # pylint: enable=protected-access self.assertEqual(relaxed_shape.rank, 1) self.assertEqual(tensor_shape.dimension_value(relaxed_shape[0]), None) t = constant_op.constant([1.0, 1.0, 1.0], dtype=dtypes.float32) defined(t) # Shape (3,) matches the relaxed shape TensorShape([None]) self.assertLen(total_function_cache(defined), 2) def testPythonFunctionWithDefaultArgs(self): def func(foo, bar=1, baz=2): del foo del bar del baz return defined = function.defun(func) defined(0, baz=20) def cache_keys(): """Sanitizes cache keys of non-input metadata.""" return tuple(key[0] for key in total_function_cache(defined)) # `True` corresponds to the fact that we're executing eagerly self.assertIn(('URRRu', (0, 1, 20)), cache_keys()) defined(1) # bar=1, baz=2 self.assertIn(('URRRu', (1, 1, 2)), cache_keys()) # This matches the previous call. defined(foo=1) self.assertLen(total_function_cache(defined), 2) defined(1, 2, 3) self.assertLen(total_function_cache(defined), 3) self.assertIn(('URRRu', (1, 2, 3)), cache_keys()) # This matches the previous call. defined(1, bar=2, baz=3) self.assertLen(total_function_cache(defined), 3) # This matches the previous call. defined(1, baz=3, bar=2) self.assertLen(total_function_cache(defined), 3) def testFunctoolsPartialUnwrappedCorrectly(self): def full_function(a, b, c=3): return a, b, c partial = functools.partial(full_function, 1, c=4) a, b, c = partial(2) defined = function.defun(partial) func_a, func_b, func_c = defined(2) self.assertEqual(func_a.numpy(), a) self.assertEqual(func_b.numpy(), b) self.assertEqual(func_c.numpy(), c) def testInputSignatureWithMatchingInputs(self): def foo(a): self.assertEqual(a.shape, (2,)) return a signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)] defined = function.defun(foo, input_signature=signature) a = array_ops.ones([2]) self.assertAllEqual(a, defined(a)) self.assertLen(total_function_cache(defined), 1) self.assertAllEqual(a, defined.get_concrete_function()(a)) self.assertAllEqual(a, defined.get_concrete_function(a)(a)) self.assertAllEqual(a, defined.get_concrete_function( tensor_spec.TensorSpec((2,), dtype=dtypes.float32))(a)) self.assertLen(total_function_cache(defined), 1) def bar(a): self.assertEqual(a._shape_tuple(), (2, None)) return a signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)] defined = function.defun(bar, input_signature=signature) a = array_ops.ones([2, 1]) out = defined(a) self.assertLen(total_function_cache(defined), 1) self.assertAllEqual(out, a) # Changing the second dimension shouldn't create a new function. b = array_ops.ones([2, 3]) out = defined(b) self.assertLen(total_function_cache(defined), 1) self.assertAllEqual(out, b) def testInputSignatureWithCompatibleInputs(self): rank2_spec = tensor_spec.TensorSpec(shape=(None, None), dtype=dtypes.float32) @function.defun(input_signature=[rank2_spec]) def func(a): self.assertEqual([None, None], a.shape.as_list()) return array_ops.shape(a) self.assertAllEqual([3, 1], func([[0], [1.0], [1]])) self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]]))) with self.assertRaisesRegexp(ValueError, 'incompatible'): func([0.0, 1.0, 2.0]) # Wrong shape. with self.assertRaisesRegexp(ValueError, 'incompatible'): func([['wrong dtype']]) def testNestedInputSignatures(self): def expected_foo(a, b): return [a, b] @function.defun(input_signature=[ [tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2, tensor_spec.TensorSpec((1,), dtypes.float32), ]) def foo(a, b): self.assertEqual(a[0]._shape_tuple(), (2, None)) self.assertEqual(a[1]._shape_tuple(), (2, None)) self.assertEqual(b._shape_tuple(), (1,)) return [a, b] a = array_ops.ones([2, 1]) b = array_ops.ones([1]) expected = expected_foo([a, a], b) out = foo([a, a], b) self.assertLen(total_function_cache(foo), 1) nest.assert_same_structure(out, expected) self.assertAllEqual(out[0][0], a) self.assertAllEqual(out[0][1], a) self.assertAllEqual(out[1], b) # Changing the unspecified dimensions shouldn't create a new function. a = array_ops.ones([2, 3]) b = array_ops.ones([2, 5]) c = array_ops.ones([1]) expected = expected_foo([a, b], c) out = foo([a, b], c) self.assertLen(total_function_cache(foo), 1) nest.assert_same_structure(out, expected) self.assertAllEqual(out[0][0], a) self.assertAllEqual(out[0][1], b) self.assertAllEqual(out[1], c) # Passing compatible inputs should work. a = a.numpy().tolist() b = b.numpy().tolist() c = c.numpy().tolist() out = foo([a, b], c) self.assertLen(total_function_cache(foo), 1) nest.assert_same_structure(out, expected) self.assertAllEqual(out[0][0], a) self.assertAllEqual(out[0][1], b) self.assertAllEqual(out[1], c) def testNestedInputSignaturesWithDict(self): def expected_bar(a): return a @function.defun(input_signature=[{ 'a': tensor_spec.TensorSpec((2, None), dtypes.float32), 'b': tensor_spec.TensorSpec((2, None), dtypes.float32), 'c': tensor_spec.TensorSpec((1,), dtypes.float32)}]) def bar(a): self.assertEqual(a['a']._shape_tuple(), (2, None)) self.assertEqual(a['b']._shape_tuple(), (2, None)) self.assertEqual(a['c']._shape_tuple(), (1,)) return a a = array_ops.ones([2, 3]) b = array_ops.ones([1]) inputs = {'a': a, 'b': a, 'c': b} expected = expected_bar(inputs) out = bar(inputs) nest.assert_same_structure(out, expected) self.assertAllEqual(out['a'], expected['a']) self.assertAllEqual(out['b'], expected['b']) self.assertAllEqual(out['c'], expected['c']) # Passing compatible inputs should work. a = a.numpy().tolist() b = b.numpy().tolist() inputs = {'a': a, 'b': a, 'c': b} out = bar(inputs) nest.assert_same_structure(out, expected) self.assertAllEqual(out['a'], expected['a']) self.assertAllEqual(out['b'], expected['b']) self.assertAllEqual(out['c'], expected['c']) def testInputSignatureMustBeSequenceOfTensorSpecs(self): def foo(a, b): del a del b # Signatures must consist exclusively of `TensorSpec` objects. signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)] with self.assertRaisesRegexp(TypeError, 'Invalid input_signature.*'): def_function.function(foo, input_signature=signature) # Signatures must be either lists or tuples on their outermost levels. signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)} with self.assertRaisesRegexp(TypeError, 'input_signature must be either a ' 'tuple or a list.*'): function.defun(foo, input_signature=signature) @test_util.run_in_graph_and_eager_modes def testInputsIncompatibleWithSignatureRaisesError(self): def foo(a): return a signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)] defined = def_function.function(foo, input_signature=signature) # Invalid shapes. with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'): defined(array_ops.ones([3])) with self.assertRaisesRegexp(ValueError, 'Python inputs incompatible.*'): defined(array_ops.ones([2, 1])) # Wrong number of arguments. with self.assertRaisesRegexp(TypeError, r'Received 2 argument\(s\)'): defined(array_ops.ones([2]), array_ops.ones([2])) with self.assertRaisesRegexp(ValueError, 'Structure of Python function inputs.*'): defined() with self.assertRaisesRegexp(ValueError, 'inputs incompatible with input_signature'): defined.get_concrete_function( tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.float32)) def testInputsIncompatibleWithNestedSignatureRaisesError(self): def foo(a, b): return [a, b] signature = [[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2, [tensor_spec.TensorSpec((1,), dtypes.float32)] * 2] defined = function.defun(foo, input_signature=signature) a = array_ops.ones([1]) with self.assertRaisesRegexp(ValueError, 'Structure of Python function inputs.*'): defined([a, a, a], [a]) with self.assertRaisesRegexp(ValueError, 'Structure of Python function inputs.*'): defined([a], [a, a, a]) defined([a, a], [a, a]) def testUnderspecifiedInputSignature(self): @function.defun(input_signature=[ tensor_spec.TensorSpec([], dtypes.float32), ]) def foo(a, training=True): if training: return a else: return -1.0 * a x = constant_op.constant(1.0) with self.assertRaisesRegexp(TypeError, 'only pass arguments'): foo(x, training=True) with self.assertRaisesRegexp(TypeError, 'only pass arguments'): foo(x, training=False) self.assertAllEqual(x.numpy(), foo(x).numpy()) def testInputSignatureWithPartialFunction(self): def full_function(a, b, c=3.0): return a, b, c partial = functools.partial(full_function, 1, c=4) a, b, c = partial(2.0) signature = [tensor_spec.TensorSpec([], dtypes.float32)] defined = function.defun(partial, input_signature=signature) x = constant_op.constant(2.0) func_a, func_b, func_c = defined(x) self.assertEqual(func_a.numpy(), a) self.assertEqual(func_b.numpy(), b) self.assertEqual(func_c.numpy(), c) def testInputSignatureConversionWithDefaultArg(self): def foo(a, training=True): if training: return a else: return -1.0 * a signature = [ tensor_spec.TensorSpec([], dtypes.float32), tensor_spec.TensorSpec([], dtypes.bool), ] defined = def_function.function(foo, input_signature=signature) a = constant_op.constant(1.0) self.assertAllEqual(a.numpy(), defined(a)) self.assertAllEqual(a.numpy(), defined(a, training=True)) self.assertAllEqual(-a.numpy(), defined(a, training=False)) def testInputSignatureWithKeywordPositionalArgs(self): @function.defun(input_signature=[ tensor_spec.TensorSpec([], dtypes.float32), tensor_spec.TensorSpec([], dtypes.int64) ]) def foo(flt, integer): return flt, integer flt = constant_op.constant(1.0) integer = constant_op.constant(2, dtypes.int64) out1, out2 = foo(flt, integer) self.assertLen(total_function_cache(foo), 1) self.assertEqual(out1.numpy(), 1.0) self.assertEqual(out2.numpy(), 2) out1, out2 = foo(flt=flt, integer=integer) self.assertLen(total_function_cache(foo), 1) self.assertEqual(out1.numpy(), 1.0) self.assertEqual(out2.numpy(), 2) out1, out2 = foo(integer=integer, flt=flt) self.assertLen(total_function_cache(foo), 1) self.assertEqual(out1.numpy(), 1.0) self.assertEqual(out2.numpy(), 2) out1, out2 = foo(flt, integer=integer) self.assertLen(total_function_cache(foo), 1) self.assertEqual(out1.numpy(), 1.0) self.assertEqual(out2.numpy(), 2) def testInputSignatureWithKeywordArgs(self): def foo(a, b, **kwargs): del kwargs return a, b x = function.defun( foo, input_signature=[ tensor_spec.TensorSpec([], dtypes.float32), tensor_spec.TensorSpec([], dtypes.int32) ]).get_concrete_function() result = x(constant_op.constant(5.0), constant_op.constant(5)) self.assertAllEqual(result, [5.0, 5]) def testInputSignatureWithCompositeTensors(self): def f(rt): self.assertEqual(rt.values.shape.as_list(), [None]) self.assertEqual(rt.row_splits.shape.as_list(), [4]) return rt signature = [ragged_tensor.RaggedTensorSpec( shape=[3, None], dtype=dtypes.int32)] defined = function.defun(f, input_signature=signature) rt1 = ragged_factory_ops.constant([[1], [], [2, 3, 4]]) out1 = defined(rt1) self.assertLen(total_function_cache(defined), 1) self.assertAllEqual(out1.values, rt1.values) self.assertAllEqual(out1.row_splits, rt1.row_splits) # Changing the row lengths shouldn't create a new function. rt2 = ragged_factory_ops.constant([[1, 2], [3, 4], [5]]) out2 = defined(rt2) self.assertLen(total_function_cache(defined), 1) self.assertAllEqual(out2.values, rt2.values) self.assertAllEqual(out2.row_splits, rt2.row_splits) # Different number of rows rt3 = ragged_factory_ops.constant([[1, 2], [3, 4], [5], [6]]) with self.assertRaisesRegexp(ValueError, 'incompatible'): defined(rt3) # Different dtype rt4 = ragged_factory_ops.constant([[1.0, 2.0], [], [3.0]]) with self.assertRaisesRegexp(ValueError, 'Structure .* does not match'): defined(rt4) # Different rank rt5 = ragged_factory_ops.constant([[[1]], [[2]], [[3]]]) with self.assertRaisesRegexp(ValueError, 'does not match'): defined(rt5) def testTensorKeywordArguments(self): def foo(a, b): del a return b defined = function.defun(foo) a = constant_op.constant(2.0) b = constant_op.constant([1.0, 2.0]) one = defined(a, b) self.assertLen(total_function_cache(defined), 1) two = defined(a=a, b=b) self.assertLen(total_function_cache(defined), 1) three = defined(b=b, a=a) self.assertLen(total_function_cache(defined), 1) four = defined(a, b=b) self.assertLen(total_function_cache(defined), 1) # The next call corresponds to a new input signature, hence # we expect another function to be defined. five = defined(b, a) self.assertLen(total_function_cache(defined), 2) six = defined(a=b, b=a) self.assertLen(total_function_cache(defined), 2) seven = defined(b=a, a=b) self.assertLen(total_function_cache(defined), 2) self.assertAllEqual(one, [1.0, 2.0]) self.assertAllEqual(two, [1.0, 2.0]) self.assertAllEqual(three, [1.0, 2.0]) self.assertAllEqual(four, [1.0, 2.0]) self.assertAllEqual(five, 2.0) self.assertAllEqual(six, 2.0) self.assertAllEqual(seven, 2.0) def testDefuningInstanceMethod(self): integer = constant_op.constant(2, dtypes.int64) class Foo(object): def one(self, tensor): return tensor @def_function.function def two(self, tensor, other=integer): return self.one(tensor), other foo = Foo() t = constant_op.constant(1.0) one, two = foo.two(t) self.assertEqual(one.numpy(), 1.0) self.assertEqual(two.numpy(), 2) def testDefuningInstanceMethodWithDefaultArgument(self): integer = constant_op.constant(2, dtypes.int64) class Foo(object): @def_function.function def func(self, other=integer): return other foo = Foo() self.assertEqual(foo.func().numpy(), int(integer)) def testPythonCallWithSideEffects(self): state = [] @def_function.function def side_effecting_function(): state.append(0) side_effecting_function() self.assertAllEqual(state, [0]) # The second invocation should call the graph function, which shouldn't # trigger the list append. side_effecting_function() self.assertAllEqual(state, [0]) # Whereas calling the python function directly should create a side-effect. side_effecting_function.python_function() self.assertAllEqual(state, [0, 0]) def testFunctionWithNestedFunctionCallAndSideEffects(self): v1 = variables.Variable(1.0) v2 = variables.Variable(1.0) @def_function.function def add_one(a): a.assign_add(1.0) # Grappler will inline calls to `add_one` into the function body, we check # that all side-effects were executed. @def_function.function def side_effecting_function(a, b): add_one(a) add_one(b) return a + b result = side_effecting_function(v1, v2) self.assertEqual(result.numpy(), 4.0) def testFunctionWithExtraAttributes(self): @function.defun_with_attributes(attributes={'experimental_1': 'value1', 'experimental_2': 2}) def matmul(x, y): return math_ops.matmul(x, y) def add(x, y): return math_ops.add(x, y) defun_add = function.defun_with_attributes( add, attributes={'experimental_3': True, 'experimental_4': 1.0}) with context.graph_mode(), self.cached_session(): with ops.get_default_graph().as_default(): t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) sq = matmul(t, t) double = defun_add(t, t) self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22]) self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8]) graph = ops.get_default_graph() # pylint: disable=protected-access self.assertLen(graph._functions, 2) functions = list(graph._functions.values()) self.assertRegexpMatches( functions[0].definition.signature.name, '.*matmul.*') attrs = functions[0].definition.attr self.assertLen(attrs, 2) self.assertEqual(attrs['experimental_1'].s, b'value1') self.assertEqual(attrs['experimental_2'].i, 2) self.assertRegexpMatches( functions[1].definition.signature.name, '.*add.*') attrs = functions[1].definition.attr self.assertLen(attrs, 2) self.assertEqual(attrs['experimental_3'].b, True) self.assertEqual(attrs['experimental_4'].f, 1.0) # pylint: enable=protected-access def testFunctionWithInvalidAttribute(self): @function.defun_with_attributes(attributes={'experimental_1': ['value1']}) def add(x, y): return math_ops.add(x, y) with self.assertRaisesRegexp(ValueError, '.*Unsupported attribute type.*'): with context.graph_mode(), self.cached_session(): with ops.get_default_graph().as_default(): t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) add(t, t) def testRegisterFunction(self): @function.defun def add(x, y): return math_ops.add(x, y) def matmul(x, y): return math_ops.matmul(x, y) defun_matmul = function.defun(matmul) with context.graph_mode(), self.cached_session(): with ops.get_default_graph().as_default(): t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) function.register(defun_matmul, t, t) function.register(add, t, t) graph = ops.get_default_graph() # pylint: disable=protected-access self.assertLen(graph._functions, 6) # two sets of functions, each of them are (inference, forward, backward) functions = list(graph._functions.values()) captured_function_names = [ f.definition.signature.name for f in functions ] expected_func_name_regex = [ '.*inference.*matmul.*', '.*forward.*matmul.*', '.*inference.*backward.*matmul.*', '.*inference.*add.*', '.*forward.*add.*', '.*inference.*backward.*add.*', ] for i in range(len(functions)): self.assertRegexpMatches(captured_function_names[i], expected_func_name_regex[i]) # Check the forward and backward function has the correct attributes. self.assertEqual( functions[1].definition.attr['backward_function_name'].s, functions[2].name) self.assertEqual( functions[2].definition.attr['forward_function_name'].s, functions[1].name) self.assertEqual( functions[4].definition.attr['backward_function_name'].s, functions[5].name) self.assertEqual( functions[5].definition.attr['forward_function_name'].s, functions[4].name) sq = defun_matmul(t, t) double = add(t, t) self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22]) self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8]) # Make sure the pre registered function is used, and no other function # is added. self.assertLen(graph._functions, 6) functions = list(graph._functions.values()) for i in range(len(functions)): self.assertEqual(captured_function_names[i], functions[i].definition.signature.name) @parameterized.named_parameters( dict(testcase_name='Defun', function_decorator=function.defun), dict(testcase_name='DefFunction', function_decorator=def_function.function)) def testRegisterConcreteFunction(self, function_decorator): @function_decorator def py_add(x, y): return math_ops.add(x, y) py_add(array_ops.ones([]), array_ops.ones([])) add = py_add.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32)) @function_decorator def py_composite(x, y): return x, add(x, y) py_composite(array_ops.ones([]), array_ops.ones([])) composite = py_composite.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32)) with context.graph_mode(), self.cached_session(): with ops.get_default_graph().as_default(): t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) composite.add_to_graph() composite.add_gradient_functions_to_graph() graph = ops.get_default_graph() # pylint: disable=protected-access self.assertLen(graph._functions, 6) # two sets of functions, each of them are (inference, forward, backward) functions = list(graph._functions.values()) captured_function_names = [ f.definition.signature.name for f in functions ] expected_func_name_regex = [ '.*inference.*py_composite.*', '.*inference.*py_add.*', '.*forward.*py_composite.*', '.*forward.*py_add.*', '.*inference.*backward.*py_composite.*', '.*inference.*backward.*py_add.*', ] for expected, found in zip( expected_func_name_regex, captured_function_names): self.assertRegexpMatches(found, expected) composite_t, composite_double = composite(t, t) double = add(t, t) self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double)) self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double)) self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t)) # Make sure the pre registered function is used, and no other function # is added. self.assertLen(graph._functions, 6) @parameterized.named_parameters( dict(testcase_name='Defun', function_decorator=function.defun), dict(testcase_name='DefFunction', function_decorator=def_function.function)) def testEagerCaptures(self, function_decorator): with context.eager_mode(): large_tensor = array_ops.ones(shape=(256,)) self.assertGreater(256, func_graph._EAGER_CONST_THRESHOLD) small_tensor = array_ops.ones(shape=(4,)) self.assertLessEqual(4, func_graph._EAGER_CONST_THRESHOLD) v = resource_variable_ops.ResourceVariable(0.0) for captured, op_type in [(large_tensor, 'Placeholder'), (small_tensor, 'Const'), (v, 'Placeholder')]: @function_decorator def test_fn(): return captured + 1 # pylint: disable=cell-var-from-loop g = test_fn.get_concrete_function().graph internal_captures = g.internal_captures self.assertLen(internal_captures, 1) self.assertEqual(internal_captures[0].op.type, op_type) def testRegisterFunctionWithInputSignature(self): def matmul(x, y): return math_ops.matmul(x, y) defun_matmul = function.defun( matmul, input_signature=[ tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32), tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32) ]) with context.graph_mode(), self.cached_session(): with ops.get_default_graph().as_default(): t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) function.register(defun_matmul, t, t) graph = ops.get_default_graph() # pylint: disable=protected-access self.assertLen(graph._functions, 3) # Test register function with cache, note inputs are ignored. function.register(defun_matmul) graph = ops.get_default_graph() self.assertLen(graph._functions, 3) def testRegisterFunctionWithCache(self): def matmul(x, y): return math_ops.matmul(x, y) defun_matmul = function.defun(matmul) with context.graph_mode(), self.cached_session(): with ops.get_default_graph().as_default(): t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]]) function.register(defun_matmul, t, t) function.register(defun_matmul, t2, t2) graph = ops.get_default_graph() # Only one function is registered since the input param are in same type # pylint: disable=protected-access self.assertLen(graph._functions, 3) def testCallingFunctionWithDifferentVariables(self): @function.defun def foo(v): v.assign_add(1.0) return v.read_value() v = resource_variable_ops.ResourceVariable(0.0) graph_function = foo.get_concrete_function(v) self.assertLen(graph_function.inputs, 1) self.assertEmpty(graph_function.captured_inputs) self.assertEqual(float(graph_function(v)), 1.0) self.assertEqual(float(graph_function(v)), 2.0) w = resource_variable_ops.ResourceVariable(0.0) @function.defun def bar(v): del v return constant_op.constant(1.0) graph_function = bar.get_concrete_function(v) self.assertEqual(float(graph_function(v)), 1.0) self.assertEqual(float(graph_function(w)), 1.0) def testCallingFunctionWithNonTensorsFails(self): @function.defun def foo(x): return x graph_function = foo.get_concrete_function(constant_op.constant(1.0)) with self.assertRaisesRegexp( ValueError, 'All inputs to `ConcreteFunction`s must be Tensors;.*'): graph_function('Not a Tensor.') def testSwapImplementationWithGrapplerPlugin(self): # Set the min_graph_nodes to -1 since the graph in this test is too small, # and will be ignored by grappler if don't set this. rewrites = rewriter_config_pb2.RewriterConfig() rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON rewrites.min_graph_nodes = -1 graph_options = config_pb2.GraphOptions( rewrite_options=rewrites, build_cost_model=1) config_proto = config_pb2.ConfigProto(graph_options=graph_options) with context.graph_mode(), self.cached_session( config=config_proto, graph=ops.Graph(), use_gpu=True): @function.defun_with_attributes( attributes={ 'api_implements': 'random_boost', 'api_preferred_device': 'CPU' }) def cpu_boost(x): return math_ops.add(x, 2.0) @function.defun_with_attributes( attributes={ 'api_implements': 'random_boost', 'api_preferred_device': 'GPU' }) def gpu_boost(x): return math_ops.add(x, 4.0) x = constant_op.constant(1.0) function.register(cpu_boost, x) y = gpu_boost(x) y_value = self.evaluate(y) if test.is_gpu_available(): self.assertEqual(y_value, 5.0) else: # Grappler fallback to use the CPU impl even called with GPU function. self.assertEqual(y_value, 3.0) def testSwapImplementationInEager(self): if not context.executing_eagerly(): self.skipTest('eager only') context.context().set_optimizer_experimental_options( {'min_graph_nodes': -1, 'implementation_selector': True}) @function.defun_with_attributes( attributes={'api_implements': 'foo', 'api_preferred_device': 'CPU'}) def on_cpu(x): return x + 2 @function.defun_with_attributes( attributes={'api_implements': 'foo', 'api_preferred_device': 'GPU'}) def on_gpu(x): return x + 4 @function.defun def run_on_cpu(t): function.register(on_cpu, t) with ops.device('CPU:0'): return on_gpu(t) # Expect to run the on_cpu branch, regardless whether gpu is available. self.assertEqual(run_on_cpu(constant_op.constant(1)).numpy(), 3) def testDefunFunctionSeparateGraphs(self): with context.graph_mode(): @function.defun def add(x): return x + 5 @function.defun def maybe_add(x, should_add): if should_add: return add(x) else: return x with ops.Graph().as_default(): x = constant_op.constant(11) maybe_add(x, True) self.assertLen(total_function_cache(maybe_add), 1) self.assertLen(total_function_cache(add), 1) maybe_add(x, False) self.assertLen(total_function_cache(maybe_add), 2) self.assertLen(total_function_cache(add), 1) with ops.Graph().as_default(): x = constant_op.constant(11) maybe_add(x, True) self.assertLen(total_function_cache(maybe_add), 3) self.assertLen(total_function_cache(add), 2) def testCacheKeyOverlappingShapes(self): @function.defun def defined(t): return t defined(array_ops.zeros([12, 1])) self.assertLen(total_function_cache(defined), 1) defined(array_ops.zeros([1, 21])) self.assertLen(total_function_cache(defined), 2) def testCacheKeyNestedLists(self): @function.defun def defined(l): return l a = constant_op.constant(1.) b = constant_op.constant(2.) c = constant_op.constant(3.) defined([[a], b, c]) self.assertLen(total_function_cache(defined), 1) defined([[a, b], c]) self.assertLen(total_function_cache(defined), 2) def testDecoratedMethod(self): m = DefunnedMiniModel() instance_call_one = m.call(array_ops.ones([1, 2]), training=True) instance_call_two = m.call( inputs=array_ops.ones([1, 2]), training=True) class_call = DefunnedMiniModel.call(m, array_ops.ones([1, 2]), training=True) self.assertAllEqual(instance_call_one, instance_call_two) self.assertAllEqual(instance_call_one, class_call) def testDecoratedMethodUniqueFunctionPerInstance(self): m = DefunnedMiniModel() n = DefunnedMiniModel() class_method_one = DefunnedMiniModel.call class_method_two = DefunnedMiniModel.call m_method_one = m.call m_method_two = m.call n_method_one = n.call n_method_two = n.call self.assertEqual(class_method_one, class_method_two) self.assertEqual(m_method_one, m_method_two) self.assertEqual(n_method_one, n_method_two) self.assertNotEqual(m.call, n.call) def testDecoratedMethodInspect(self): m = DefunnedMiniModel() fullargspec = tf_inspect.getfullargspec(m.call) self.assertIn('training', fullargspec.args) def testDecoratedMethodGetConcreteFunction(self): m = DefunnedMiniModel() instance_call_one = m.call.get_concrete_function( array_ops.ones([1, 2]), training=False) instance_call_two = m.call.get_concrete_function( inputs=array_ops.ones([1, 2]), training=False) self.assertAllEqual(instance_call_one(array_ops.ones([1, 2])), instance_call_two(array_ops.ones([1, 2]))) # Also make sure get_concrete_function works on the class method DefunnedMiniModel.call.get_concrete_function( m, array_ops.ones([1, 2]), training=False) DefunnedMiniModel.call.get_concrete_function( m, inputs=array_ops.ones([1, 2]), training=True) def testFunctionModifiesInputList(self): # Tests on `list` methods that do in place modification, except `list.sort` # since it cannot even be "defunned" in the first place def get_list(): return [constant_op.constant(0.), constant_op.constant(1.)] expected_msg = ( 'Function to be traced should not modify structure of input ' 'arguments. Check if your function has list and dictionary ' 'operations that alter input arguments, ' 'such as `list.pop`, `list.append`') with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def append(l): l.append(constant_op.constant(0.)) append(get_list()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def extend(l): l.extend([constant_op.constant(0.)]) extend(get_list()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def insert(l): l.insert(0, constant_op.constant(0.)) insert(get_list()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def pop(l): l.pop() pop(get_list()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def reverse(l): l.reverse() reverse(get_list()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def remove(l): l.remove(l[0]) remove(get_list()) # `list.clear` is a method that is in Py3 but not Py2 if sys.version.startswith('3'): with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def clear(l): l.clear() clear(get_list()) # One last test for keyword arguments with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def kwdappend(**kwargs): l = kwargs['l'] l.append(constant_op.constant(0.)) kwdappend(l=get_list()) def testFunctionModifiesInputDict(self): def get_dict(): return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)} expected_msg = ( 'Function to be traced should not modify structure of input ' 'arguments. Check if your function has list and dictionary ' 'operations that alter input arguments, ' 'such as `list.pop`, `list.append`') with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def clear(m): m.clear() clear(get_dict()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def pop(m): m.pop('t1') pop(get_dict()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def popitem(m): m.popitem() popitem(get_dict()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def update(m): m.update({'t1': constant_op.constant(3.)}) update(get_dict()) with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def setdefault(m): m.setdefault('t3', constant_op.constant(3.)) setdefault(get_dict()) def testFunctionModifiesInputNest(self): # Test on functions that modify structure of nested input arguments expected_msg = ( 'Function to be traced should not modify structure of input ' 'arguments. Check if your function has list and dictionary ' 'operations that alter input arguments, ' 'such as `list.pop`, `list.append`') with self.assertRaisesRegexp(ValueError, expected_msg): @def_function.function def modify(n): n[0]['t1'].append(constant_op.constant(1.)) nested_input = [{ 't1': [constant_op.constant(0.), constant_op.constant(1.)], }, constant_op.constant(2.)] modify(nested_input) with self.assertRaisesRegexp(ValueError, expected_msg): # The flat list doesn't change whereas the true structure changes @def_function.function def modify_same_flat(n): n[0].append(n[1].pop(0)) nested_input = [[constant_op.constant(0.)], [constant_op.constant(1.), constant_op.constant(2.)]] modify_same_flat(nested_input) def testDecoratedMethodVariableCleanup(self): m = DefunnedMiniModel() m(array_ops.ones([1, 2])) variable_refs = list({v.experimental_ref() for v in m.variables}) self.assertLen(variable_refs, 2) del m # Verifying if the variables are only referenced from variable_refs. # We expect the reference counter to be 1, but `sys.getrefcount` reports # one higher reference counter because a temporary is created when we call # sys.getrefcount(). Hence check if the number returned is 2. # https://docs.python.org/3/library/sys.html#sys.getrefcount self.assertEqual(sys.getrefcount(variable_refs[0].deref()), 2) self.assertEqual(sys.getrefcount(variable_refs[1].deref()), 2) def testExecutorType(self): @function.defun def add_five(x): return x + 5 self.assertEqual( 5, add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy()) with self.assertRaisesRegexp(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'): with context.function_executor_type('NON_EXISTENT_EXECUTOR'): add_five(constant_op.constant(0, dtype=dtypes.int32)) for executor_type in ('', 'DEFAULT', None): with context.function_executor_type(executor_type): self.assertAllEqual( 5, add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy()) @test_util.assert_no_garbage_created def testReferenceCycles(self): fn = function.defun(lambda x: 2. * x) fn(constant_op.constant(4.0)) weak_fn = weakref.ref(fn) del fn # Tests that the weak reference we made to the function is now dead, which # means the object has been deleted. This should be true as long as the # function itself is not involved in a reference cycle. self.assertIs(None, weak_fn()) def testFunctionStackInErrorMessage(self): if context.executing_eagerly(): # TODO(b/122736651): Remove this skipTest once fixed. self.skipTest('Error interpolation is not working when function is ' 'invoked without PartitionedCallOp.') @def_function.function() def fn3(x): return x + 2 @def_function.function() def fn2(x): check_ops.assert_equal(fn3(x), 3) return 2 @def_function.function() def fn(x): return fn2(x) with self.assertRaises(errors.InvalidArgumentError) as cm: fn(2) e = cm.exception self.assertIn('fn -> fn2', e.message) self.assertIn('node assert_equal/Assert/Assert (defined at', e.message) self.assertNotIn('fn3', e.message) @test_util.run_gpu_only def testFunctionIsNotPinned(self): """Tests that functions aren't pinned to the CPU by the eager runtime.""" seed1, seed2 = 79, 25 shape = constant_op.constant([4, 7]) dtype = dtypes.float32 @def_function.function def func(): with ops.device('GPU:0'): return gen_random_ops.random_standard_normal( shape, dtype=dtype, seed=seed1, seed2=seed2) with ops.device('GPU:0'): x = func() self.assertRegexpMatches(x.device, 'GPU') @test_util.run_in_graph_and_eager_modes def testShapeCaching(self): @function.defun def func(x): return array_ops.shape(x) @function.defun( input_signature=[tensor_spec.TensorSpec([None, None], dtypes.float32)]) def calls_func(x): return func(x) self.assertAllEqual([1, 1], self.evaluate(func(array_ops.zeros([1, 1])))) self.assertAllEqual([2, 2], self.evaluate(func(array_ops.zeros([2, 2])))) self.assertAllEqual( [3, 3], self.evaluate(calls_func(array_ops.zeros([3, 3])))) def testLimitedRetracing(self): trace_count = [0] @function.defun def func(x): trace_count[0] += 1 return x for _ in range(50): func(constant_op.constant(3.)) func(constant_op.constant(4.)) func(constant_op.constant([[1., 2.]])) func(constant_op.constant([[]])) func(constant_op.constant([[3., 4.], [5., 6.]])) func(constant_op.constant([[3., 4.], [5., 6.], [7., 8.]])) # Tracing more than twice per input doesn't make sense. self.assertLess(trace_count[0], 13) def testLimitedRetracingWithCompositeTensors(self): trace_count = [0] @def_function.function def f(x): trace_count[0] += 1 return x for i in range(10): f(ragged_factory_ops.constant([[1, 2], [i]])) f(ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]])) f(ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]])) self.assertEqual(trace_count[0], 3) def test_concrete_function_shape_mismatch(self): @def_function.function def f(argument_name): return argument_name + 1. f_concrete = f.get_concrete_function(constant_op.constant([1.])) # Calling a function from eager doesn't do any shape checking above what # kernels do while executing. self.assertAllEqual( [2., 3.], f_concrete(constant_op.constant([1., 2.])).numpy()) @def_function.function def g(): f_concrete(constant_op.constant([1., 2.])) with self.assertRaisesRegexp(ValueError, 'argument_name'): g() @test_util.run_in_graph_and_eager_modes def test_shape_inference_with_symbolic_shapes(self): @def_function.function def _uses_symbolic_shapes(w, x, y): x = array_ops.identity(x, name='name_collision') x = array_ops.transpose(x, [1, 0, 2]) x_batch = array_ops.shape(x)[0] y_batch = array_ops.shape(y)[0] y *= w n = y_batch // x_batch return array_ops.reshape(y, [n, x_batch, -1]) conc = _uses_symbolic_shapes.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32)) @def_function.function def _call_concrete(): c = constant_op.constant(1.) array_ops.identity(c, name='name_collision') output1 = conc(array_ops.ones([2]), array_ops.ones([5, 4, 2]), array_ops.ones([20, 2])) self.assertEqual([5, 4, 2], output1.shape) output2 = conc(array_ops.ones([3]), array_ops.ones([5, 4, 3]), array_ops.ones([40, 3])) self.assertEqual([10, 4, 3], output2.shape) return output1, output2 output1, output2 = _call_concrete() self.assertEqual((5, 4, 2), self.evaluate(output1).shape) self.assertEqual((10, 4, 3), self.evaluate(output2).shape) def testAutoGraphContext(self): @def_function.function def test_fn(): self.assertEqual( ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED) prev_status = ag_ctx.control_status_ctx().status test_fn() self.assertEqual(ag_ctx.control_status_ctx().status, prev_status) def testCancelBeforeFunctionExecution(self): if not context.executing_eagerly(): self.skipTest('eager only') q = data_flow_ops.FIFOQueue(1, dtypes.int32) @def_function.function def f(): return q.dequeue() c_mgr = cancellation.CancellationManager() cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function()) c_mgr.start_cancel() with self.assertRaises(errors.CancelledError): cancelable_func() def testCancelBlockedFunctionExecution(self): if not context.executing_eagerly(): self.skipTest('eager only') q = data_flow_ops.FIFOQueue(1, dtypes.int32) @def_function.function def f(): return q.dequeue() c_mgr = cancellation.CancellationManager() cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function()) def cancel_thread(): time.sleep(0.5) c_mgr.start_cancel() t = self.checkedThread(cancel_thread) t.start() with self.assertRaises(errors.CancelledError): cancelable_func() t.join() def testCancelAfterFunctionExecution(self): if not context.executing_eagerly(): self.skipTest('eager only') q = data_flow_ops.FIFOQueue(1, dtypes.int32) q.enqueue(37) @def_function.function def f(): return q.dequeue() c_mgr = cancellation.CancellationManager() cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function()) self.assertAllEqual(37, cancelable_func().numpy()) # Cancellation after the function executes is a no-op. c_mgr.start_cancel() class MultiDeviceTest(test.TestCase, parameterized.TestCase): @test_util.run_gpu_only def testMultiDeviceOutput(self): """Tests that functions can produce outputs on multiple devices.""" @function.defun def func(a, b, transpose_a): with ops.device('/device:CPU:0'): m1 = math_ops.matmul(a, b, transpose_a=transpose_a) with ops.device('/device:GPU:0'): m2 = math_ops.matmul(a, b, transpose_a=transpose_a) return m1, m2 t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) m1, m2 = func(t, t, transpose_a=True) self.assertAllEqual(m1.numpy(), [[10, 14], [14, 20]]) self.assertRegexpMatches(m1.backing_device, 'CPU') self.assertAllEqual(m2.numpy(), [[10, 14], [14, 20]]) self.assertRegexpMatches(m2.backing_device, 'GPU') @test_util.run_gpu_only def testEmptyBody(self): @function.defun def func(a, b): return b, a with ops.device('/device:CPU:0'): a = constant_op.constant(3.0) with ops.device('/device:GPU:0'): b = constant_op.constant(5.0) m1, m2 = func(a, b) self.assertAllEqual(m1.numpy(), 5.0) self.assertRegexpMatches(m1.backing_device, 'GPU') self.assertAllEqual(m2.numpy(), 3.0) self.assertRegexpMatches(m2.backing_device, 'CPU') @test_util.run_gpu_only def testMultiDeviceInt32(self): """Tests that multi-device functions can take and output INT32s. When an INT32 device tensor is fed into a function, it is copied to CPU by the eager runtime. The function sees all INT32 inputs on CPU. We set allocator attribute 'on_host' for INT32 outputs. They can be partitioned into the GPU component function, but will be allocated on CPU nevertheless. There is experimental support for `ints_on_device` in FunctionLibraryRuntime now. We can try that. """ with ops.device('/device:CPU:0'): int_cpu = constant_op.constant(3, dtype=dtypes.int32) resource = resource_variable_ops.ResourceVariable(5, dtype=dtypes.int32) with ops.device('/device:GPU:0'): int_gpu = constant_op.constant(7, dtype=dtypes.int32) @function.defun def func(int_cpu, resource, int_gpu): with ops.device('/device:CPU:0'): m1 = int_cpu * resource + int_gpu with ops.device('/device:GPU:0'): # This computation will happen on GPU but m2 will be copied to CPU. m2 = int_gpu * resource + int_cpu + 1 return m1, m2 m1, m2 = func(int_cpu, resource, int_gpu) self.assertAllEqual(m1.numpy(), 22) self.assertRegexpMatches(m1.backing_device, 'CPU') self.assertAllEqual(m2.numpy(), 39) self.assertRegexpMatches(m2.backing_device, 'CPU') # flip arguments m1, m2 = func(int_gpu, resource, int_cpu) self.assertAllEqual(m1.numpy(), 38) self.assertRegexpMatches(m1.backing_device, 'CPU') self.assertAllEqual(m2.numpy(), 23) self.assertRegexpMatches(m2.backing_device, 'CPU') @test_util.run_gpu_only def testMultiDeviceColocateWith(self): """Tests that function's outputs respect colocation constraints.""" @function.defun def func(a, b): with ops.colocate_with(a): ra = 2 * a with ops.colocate_with(b): rb = 3 * b return ra, rb devices = ['/device:CPU:0', '/device:GPU:0'] for dev1, dev2 in itertools.product(devices, devices): with ops.device(dev1): a = constant_op.constant(1.0) with ops.device(dev2): b = constant_op.constant(10.0) ra, rb = func(a, b) self.assertEqual(ra.numpy(), 2.0) self.assertRegexpMatches(ra.backing_device, dev1) self.assertEqual(rb.numpy(), 30.0) self.assertRegexpMatches(rb.backing_device, dev2) @test_util.run_gpu_only def testMultiDeviceResources(self): with ops.device('/device:CPU:0'): c1 = resource_variable_ops.ResourceVariable(2.0) c2 = resource_variable_ops.ResourceVariable(7.0) with ops.device('/device:GPU:0'): g1 = resource_variable_ops.ResourceVariable(3.0) g2 = resource_variable_ops.ResourceVariable(5.0) @function.defun def func(resource1, resource2): with ops.device('/device:CPU:0'): result1 = resource1 * g2 with ops.device('/device:GPU:0'): result2 = resource2 * c2 return result1, result2 r1, r2 = func(c1, g1) self.assertEqual(r1.numpy(), 10.0) self.assertRegexpMatches(r1.backing_device, 'CPU') self.assertEqual(r2.numpy(), 21.0) self.assertRegexpMatches(r2.backing_device, 'GPU') # Call with flipped inputs. Check that we look at resource's # device and reinstantiates the function when inputs' devices change. r1, r2 = func(g1, c1) self.assertEqual(r1.numpy(), 15.0) self.assertRegexpMatches(r1.backing_device, 'CPU') self.assertEqual(r2.numpy(), 14.0) self.assertRegexpMatches(r2.backing_device, 'GPU') @test_util.run_gpu_only def testOutputResources(self): with ops.device('/device:CPU:0'): c1 = resource_variable_ops.ResourceVariable(2.0) with ops.device('/device:GPU:0'): g1 = resource_variable_ops.ResourceVariable(3.0) @function.defun def func(resource1, resource2): with ops.device('/device:CPU:0'): result1 = resource1 * 5 with ops.device('/device:GPU:0'): result2 = resource2 * 7 return result1, resource1.handle, result2, resource2.handle r1, res1, r2, res2 = func(c1, g1) self.assertEqual(r1.numpy(), 10.0) self.assertRegexpMatches(r1.backing_device, 'CPU') self.assertEqual(r2.numpy(), 21.0) self.assertRegexpMatches(r2.backing_device, 'GPU') def check_handle(handle, expected_value): self.assertRegexpMatches(handle.backing_device, 'CPU') tensor = gen_resource_variable_ops.read_variable_op( handle, dtypes.float32) self.assertEqual(tensor.numpy(), expected_value) # Check that handles returned from functions are on CPU and an op using # the resource handle is correctly placed on the device backing the # resource. check_handle(res1, 2.0) check_handle(res2, 3.0) # Call with flipped inputs to make sure the same the function is # reinstantiated and eager runtime does not mess up the device assignment # for ops consuming handles returned from defuns. r1, res1, r2, res2 = func(g1, c1) self.assertEqual(r1.numpy(), 15.0) self.assertRegexpMatches(r1.backing_device, 'CPU') self.assertEqual(r2.numpy(), 14.0) self.assertRegexpMatches(r2.backing_device, 'GPU') check_handle(res1, 3.0) check_handle(res2, 2.0) @test_util.run_gpu_only def testPassResourceThroughNestedFunctionCall(self): """Test passing GPU resource to noinline function call placed on CPU. PartitionedCallOp must not enforce any particular device assignment for the resource output. Inner function marked as `_nospecialize`, so Grappler would not prune unused function output. """ with ops.device('/device:GPU:0'): g1 = resource_variable_ops.ResourceVariable(3.0) @function.defun_with_attributes(attributes={ '_noinline': True, '_nospecialize': True }) def inner(resource1): return resource1 * 2, resource1.handle @function.defun def outer(resource1): with ops.device('/device:CPU:0'): r1, _ = inner(resource1) return r1 r1 = outer(g1) self.assertEqual(r1.numpy(), 6.0) self.assertRegexpMatches(r1.backing_device, 'CPU') @test_util.run_gpu_only def testReturnResourceFromNestedFunctionCall(self): """Test returning GPU resource from noinline function call placed on CPU. When inferring output devices for the return value, do not set a device for returns of DT_RESOURCE data type based on the device assignment of the node that produced that resource. As an example function call placed on CPU can return resources on GPU. """ with ops.device('/device:GPU:0'): g1 = resource_variable_ops.ResourceVariable(3.0) @function.defun_with_attributes(attributes={ '_noinline': True }) def inner(resource1): resource1.assign_add(2.0) return resource1 * 2, resource1.handle @function.defun def outer(resource1): with ops.device('/device:CPU:0'): r1, res1 = inner(resource1) return r1, res1 r1, res1 = outer(g1) self.assertEqual(r1.numpy(), 10.0) self.assertRegexpMatches(r1.backing_device, 'CPU') def check_handle(handle, expected_value): self.assertRegexpMatches(handle.backing_device, 'CPU') tensor = gen_resource_variable_ops.read_variable_op( handle, dtypes.float32) self.assertEqual(tensor.numpy(), expected_value) # Check that handles returned from functions are on CPU and an op using # the resource handle is correctly placed on the device backing the # resource. check_handle(res1, 5.0) @test_util.run_gpu_only def testComplexInputOutputDevicePattern(self): """Tests input/output mapping logic in partitioning.""" with ops.device('/device:CPU:0'): rc0 = resource_variable_ops.ResourceVariable(2.0) rc1 = resource_variable_ops.ResourceVariable(3.0) cc0 = constant_op.constant(5.0) cc1 = constant_op.constant(7.0) with ops.device('/device:GPU:0'): rg0 = resource_variable_ops.ResourceVariable(11.0) rg1 = resource_variable_ops.ResourceVariable(13.0) cg0 = constant_op.constant(17.0) cg1 = constant_op.constant(19.0) # Make sure tensors are on expected devices. for tensor in [cc0, cc1]: self.assertRegexpMatches(tensor.backing_device, 'CPU:0') for tensor in [cg0, cg1]: self.assertRegexpMatches(tensor.backing_device, 'GPU:0') @function.defun def func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1): with ops.device('/device:CPU:0'): m1 = rc0 * cg0 with ops.device('/device:GPU:0'): m2 = rg0 * cc0 with ops.device('/device:CPU:0'): r1 = 1000.0 * m2 + rc1 * cg1 with ops.device('/device:GPU:0'): r2 = 1000.0 * m1 + rg1 * cc1 return r1, r2, m2, m1 r1, r2, m2, m1 = func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1) self.assertRegexpMatches(m1.backing_device, 'CPU') self.assertRegexpMatches(r1.backing_device, 'CPU') self.assertRegexpMatches(m2.backing_device, 'GPU') self.assertRegexpMatches(r2.backing_device, 'GPU') self.assertEqual(m1.numpy(), 34.0) self.assertEqual(r1.numpy(), 55000.0 + 3.0 * 19.0) self.assertEqual(m2.numpy(), 55.0) self.assertEqual(r2.numpy(), 34000.0 + 13.0 * 7.0) @test_util.run_gpu_only def testArgumentPrunning(self): """Tests functions taking unnecessary arguments.""" with ops.device('/device:CPU:0'): c1 = constant_op.constant(5.0) c2 = constant_op.constant(7.0) with ops.device('/device:GPU:0'): g1 = constant_op.constant(11.0) g2 = constant_op.constant(13.0) g3 = constant_op.constant(17.0) @function.defun def func(g1, g2, c1, g3, c2): # pylint: disable=unused-argument # arguments g1 and g2 are unused and can be pruned by grappler. return c1 * g3 * c2 result = func(g1, g2, c1, g3, c2) self.assertEqual(result.numpy(), 5.0 * 7.0 * 17.0) def testNestedCallWatchedVariables(self): v = variables.Variable(4.) @def_function.function def f(): return v ** 2. with backprop.GradientTape() as tape: f() self.assertEqual((v,), tape.watched_variables()) @def_function.function def g(): return f() with backprop.GradientTape() as tape: g() self.assertEqual((v,), tape.watched_variables()) # f() can rely on the variable being read during its trace. g() checks that # variables from a function which knows about them are recorded on the # tape. h() tests that functions forward knowledge of variables to callers. @def_function.function def h(): return g() with backprop.GradientTape() as tape: h() self.assertEqual((v,), tape.watched_variables()) def testStandardTrainingLoopInFunction(self): layer = core.Dense(2) dataset = ( dataset_ops.DatasetV2.from_tensors( (array_ops.ones([784]), array_ops.ones([], dtypes.int32))) .map(lambda x, y: (x, y)) .repeat(10) .batch(32)) optimizer = adam.Adam() @def_function.function def train(): for x, y in dataset: with backprop.GradientTape() as tape: out = layer(x) loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( logits=out, labels=y)) layer_variables = layer.trainable_variables gradients = tape.gradient(loss, layer_variables) optimizer.apply_gradients(zip(gradients, layer_variables)) train() def testEarlyStoppingTrainingLoopInFunction(self): layer = core.Dense(2) dataset = ( dataset_ops.DatasetV2.from_tensors( (array_ops.ones([784]), array_ops.ones([], dtypes.int32))) .map(lambda x, y: (x, y)) .repeat(10) .batch(32)) optimizer = adam.Adam() @def_function.function def train(): for x, y in dataset: with backprop.GradientTape() as tape: out = layer(x) loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( logits=out, labels=y)) layer_variables = layer.trainable_variables gradients = tape.gradient(loss, layer_variables) optimizer.apply_gradients(zip(gradients, layer_variables)) if optimizer.iterations > 3: break train() def testDeferredCapture(self): value = 1.0 @def_function.function def lazy_capture(x): y = ops.get_default_graph().capture_call_time_value( lambda: value, tensor_spec.TensorSpec(None)) return x + y self.assertAllEqual(lazy_capture(2.0), 3.0) # After changing the value of `value` the function call should return a # different result. value = 2.0 self.assertAllEqual(lazy_capture(2.0), 4.0) def testDeferredCaptureWithKey(self): value0 = 1.0 value1 = 2.0 @def_function.function def lazy_capture(x): w = ops.get_default_graph().capture_call_time_value( lambda: value0, tensor_spec.TensorSpec(None), key=0) y = ops.get_default_graph().capture_call_time_value( lambda: value1, tensor_spec.TensorSpec(None), key=1) def bad_closure(): raise ValueError('Should not run') z = ops.get_default_graph().capture_call_time_value( bad_closure, tensor_spec.TensorSpec(None), key=1) return x + y + w + z self.assertAllEqual(lazy_capture(2.0), 7.0) value0 = 2.0 value1 = 3.0 self.assertAllEqual(lazy_capture(2.0), 10.0) def testDeferredCaptureTypeError(self): value = constant_op.constant(1.0) @def_function.function def lazy_capture(x): y = ops.get_default_graph().capture_call_time_value( lambda: value, tensor_spec.TensorSpec(())) return x + y self.assertAllEqual(lazy_capture(2.0), 3.0) # dtype mismatch value = constant_op.constant(1) with self.assertRaisesRegexp(ValueError, 'Value .* to a tensor with dtype'): lazy_capture(2.0) # shape mismatch value = constant_op.constant([1.0]) with self.assertRaisesRegexp(ValueError, 'Value .* shape'): lazy_capture(2.0) def testDeferredCaptureReturnNestWithCompositeTensor(self): i_s = indexed_slices.IndexedSlices( constant_op.constant([1, 2]), constant_op.constant([0, 1], dtype=dtypes.int64), constant_op.constant([2])) r_t = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]]) s_t = sparse_tensor.SparseTensor( values=[1, 2, 3], indices=[[0], [8], [10]], dense_shape=[20]) @def_function.function def lazy_capture(): y = ops.get_default_graph().capture_call_time_value( lambda: {'i': i_s, 't': (r_t, s_t)}, {'i': indexed_slices.IndexedSlicesSpec( dtype=dtypes.int32, dense_shape_dtype=dtypes.int32), 't': (ragged_tensor.RaggedTensorSpec([2, None, None], dtypes.int32), sparse_tensor.SparseTensorSpec([None], dtypes.int32))}) return y['i'], y['t'] i, (r, s) = lazy_capture() self.assertAllEqual(i_s.values, i.values) self.assertAllEqual(i_s.indices, i.indices) self.assertAllEqual(i_s.dense_shape, i.dense_shape) self.assertAllEqual(r_t, r) self.assertAllEqual(s_t.indices, s.indices) self.assertAllEqual(s_t.values, s.values) self.assertAllEqual(s_t.dense_shape, s.dense_shape) def testDeferredCaptureCompositeTensorSpecTypeMismatch(self): value = indexed_slices.IndexedSlices( constant_op.constant([1, 2]), constant_op.constant([0, 1], dtype=dtypes.int64)) @def_function.function def lazy_capture(): return ops.get_default_graph().capture_call_time_value( lambda: value, indexed_slices.IndexedSlicesSpec(dtype=dtypes.int32)) # Type matches spec. lazy_capture() # Extra dense shape component. value = indexed_slices.IndexedSlices( constant_op.constant([1, 2]), constant_op.constant([0, 1], dtype=dtypes.int64), constant_op.constant([2])) with self.assertRaises(ValueError): lazy_capture() # Index dtype mismatch int32 vs. int64. value = indexed_slices.IndexedSlices( constant_op.constant([1, 2]), constant_op.constant([0, 1])) with self.assertRaises(ValueError): lazy_capture() if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/function_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for eager execution_callbacks.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import execution_callbacks from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test RAISE = execution_callbacks.ExecutionCallback.RAISE IGNORE = execution_callbacks.ExecutionCallback.IGNORE def log_zero(): """Computes `log(0.0)`.""" return math_ops.log(constant_op.constant(0.)) class ExecutionCallbacksTest(test.TestCase): def test_errstate_inf_raise(self): with execution_callbacks.errstate(inf_or_nan=RAISE): with self.assertRaises(execution_callbacks.InfOrNanError): log_zero() def test_errstate_inf_ignore(self): with execution_callbacks.errstate(inf_or_nan=IGNORE): self.assertEqual(-float("inf"), log_zero().numpy()) def test_errstate_nesting(self): with execution_callbacks.errstate(inf_or_nan=RAISE): with execution_callbacks.errstate(inf_or_nan=IGNORE): self.assertEqual(-float("inf"), log_zero().numpy()) with self.assertRaises(execution_callbacks.InfOrNanError): log_zero() if __name__ == "__main__": ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/execution_callbacks_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helpers to connect to remote servers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.core.protobuf.tensorflow_server_pb2 import ServerDef from tensorflow.python import pywrap_tensorflow from tensorflow.python.distribute.cluster_resolver import cluster_resolver from tensorflow.python.eager import context from tensorflow.python.platform import remote_utils from tensorflow.python.training import server_lib from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export _GRPC_PREFIX = "grpc://" @tf_export("config.experimental_connect_to_host") def connect_to_remote_host(remote_host=None, job_name="worker"): """Connects to a single machine to enable remote execution on it. Will make devices on the remote host available to use. Note that calling this more than once will work, but will invalidate any tensor handles on the old remote devices. Using the default job_name of worker, you can schedule ops to run remotely as follows: ```python # Enable eager execution, and connect to the remote host. tf.compat.v1.enable_eager_execution() tf.contrib.eager.connect_to_remote_host("exampleaddr.com:9876") with ops.device("job:worker/replica:0/task:1/device:CPU:0"): # The following tensors should be resident on the remote device, and the op # will also execute remotely. x1 = array_ops.ones([2, 2]) x2 = array_ops.ones([2, 2]) y = math_ops.matmul(x1, x2) ``` Args: remote_host: a single or a list the remote server addr in host-port format. job_name: The job name under which the new server will be accessible. Raises: ValueError: if remote_host is None. """ if not remote_host: raise ValueError("Must provide at least one remote_host") remote_hosts = nest.flatten(remote_host) cluster_spec = server_lib.ClusterSpec( {job_name: [_strip_prefix(host, _GRPC_PREFIX) for host in remote_hosts]}) connect_to_cluster(cluster_spec) @tf_export("config.experimental_connect_to_cluster") def connect_to_cluster(cluster_spec_or_resolver, job_name="localhost", task_index=0, protocol=None): """Connects to the given cluster. Will make devices on the cluster available to use. Note that calling this more than once will work, but will invalidate any tensor handles on the old remote devices. If the given local job name is not present in the cluster specification, it will be automatically added, using an unused port on the localhost. Args: cluster_spec_or_resolver: A `ClusterSpec` or `ClusterResolver` describing the cluster. job_name: The name of the local job. task_index: The local task index. protocol: The communication protocol, such as `"grpc"`. If unspecified, will use the default from `python/platform/remote_utils.py`. """ protocol = protocol or remote_utils.get_default_communication_protocol() if isinstance(cluster_spec_or_resolver, server_lib.ClusterSpec): cluster_spec = cluster_spec_or_resolver elif isinstance(cluster_spec_or_resolver, cluster_resolver.ClusterResolver): cluster_spec = cluster_spec_or_resolver.cluster_spec() else: raise ValueError( "`cluster_spec_or_resolver` must be a `ClusterSpec` or a " "`ClusterResolver`.") cluster_def = cluster_spec.as_cluster_def() # Automatically add local job, if not part of the cluster spec. if job_name not in cluster_spec.jobs: local_port = pywrap_tensorflow.TF_PickUnusedPortOrDie() job_def = cluster_def.job.add() job_def.name = job_name # TODO(fishx): Update this to make sure remote worker has valid ip address # to connect with local. job_def.tasks[0] = "localhost:{}".format(local_port) server_def = ServerDef( cluster=cluster_def, job_name=job_name, task_index=task_index, protocol=protocol) # TODO(nareshmodi): Make this default since it works in more situations. os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1" context.set_server_def(server_def) def _strip_prefix(s, prefix): return s[len(prefix):] if s.startswith(prefix) else s
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/remote.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for monitoring.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import monitoring from tensorflow.python.eager import test from tensorflow.python.framework import errors from tensorflow.python.framework import test_util class MonitoringTest(test_util.TensorFlowTestCase): def test_counter(self): counter = monitoring.Counter('test/counter', 'test counter') counter.get_cell().increase_by(1) self.assertEqual(counter.get_cell().value(), 1) counter.get_cell().increase_by(5) self.assertEqual(counter.get_cell().value(), 6) def test_multiple_counters(self): counter1 = monitoring.Counter('test/counter1', 'test counter', 'label1') counter1.get_cell('foo').increase_by(1) self.assertEqual(counter1.get_cell('foo').value(), 1) counter2 = monitoring.Counter('test/counter2', 'test counter', 'label1', 'label2') counter2.get_cell('foo', 'bar').increase_by(5) self.assertEqual(counter2.get_cell('foo', 'bar').value(), 5) def test_same_counter(self): counter1 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable with self.assertRaises(errors.AlreadyExistsError): counter2 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable def test_int_gauge(self): gauge = monitoring.IntGauge('test/gauge', 'test gauge') gauge.get_cell().set(1) self.assertEqual(gauge.get_cell().value(), 1) gauge.get_cell().set(5) self.assertEqual(gauge.get_cell().value(), 5) gauge1 = monitoring.IntGauge('test/gauge1', 'test gauge1', 'label1') gauge1.get_cell('foo').set(2) self.assertEqual(gauge1.get_cell('foo').value(), 2) def test_string_gauge(self): gauge = monitoring.StringGauge('test/gauge', 'test gauge') gauge.get_cell().set('left') self.assertEqual(gauge.get_cell().value(), 'left') gauge.get_cell().set('right') self.assertEqual(gauge.get_cell().value(), 'right') gauge1 = monitoring.StringGauge('test/gauge1', 'test gauge1', 'label1') gauge1.get_cell('foo').set('start') self.assertEqual(gauge1.get_cell('foo').value(), 'start') def test_bool_gauge(self): gauge = monitoring.BoolGauge('test/gauge', 'test gauge') gauge.get_cell().set(True) self.assertTrue(gauge.get_cell().value()) gauge.get_cell().set(False) self.assertFalse(gauge.get_cell().value()) gauge1 = monitoring.BoolGauge('test/gauge1', 'test gauge1', 'label1') gauge1.get_cell('foo').set(True) self.assertTrue(gauge1.get_cell('foo').value()) def test_sampler(self): buckets = monitoring.ExponentialBuckets(1.0, 2.0, 2) sampler = monitoring.Sampler('test/sampler', buckets, 'test sampler') sampler.get_cell().add(1.0) sampler.get_cell().add(5.0) histogram_proto = sampler.get_cell().value() self.assertEqual(histogram_proto.min, 1.0) self.assertEqual(histogram_proto.num, 2.0) self.assertEqual(histogram_proto.sum, 6.0) sampler1 = monitoring.Sampler('test/sampler1', buckets, 'test sampler', 'label1') sampler1.get_cell('foo').add(2.0) sampler1.get_cell('foo').add(4.0) sampler1.get_cell('bar').add(8.0) histogram_proto1 = sampler1.get_cell('foo').value() self.assertEqual(histogram_proto1.max, 4.0) self.assertEqual(histogram_proto1.num, 2.0) self.assertEqual(histogram_proto1.sum, 6.0) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/monitoring_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow monitoring APIs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.core.framework import summary_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.framework import c_api_util from tensorflow.python.util import compat _MetricMethod = collections.namedtuple('MetricMethod', 'create delete get_cell') _counter_methods = [ _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewCounter0, delete=pywrap_tensorflow.TFE_MonitoringDeleteCounter0, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellCounter0), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewCounter1, delete=pywrap_tensorflow.TFE_MonitoringDeleteCounter1, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellCounter1), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewCounter2, delete=pywrap_tensorflow.TFE_MonitoringDeleteCounter2, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellCounter2), ] _int_gauge_methods = [ _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewIntGauge0, delete=pywrap_tensorflow.TFE_MonitoringDeleteIntGauge0, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellIntGauge0), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewIntGauge1, delete=pywrap_tensorflow.TFE_MonitoringDeleteIntGauge1, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellIntGauge1), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewIntGauge2, delete=pywrap_tensorflow.TFE_MonitoringDeleteIntGauge2, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellIntGauge2), ] _string_gauge_methods = [ _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewStringGauge0, delete=pywrap_tensorflow.TFE_MonitoringDeleteStringGauge0, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellStringGauge0), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewStringGauge1, delete=pywrap_tensorflow.TFE_MonitoringDeleteStringGauge1, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellStringGauge1), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewStringGauge2, delete=pywrap_tensorflow.TFE_MonitoringDeleteStringGauge2, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellStringGauge2), ] _bool_gauge_methods = [ _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewBoolGauge0, delete=pywrap_tensorflow.TFE_MonitoringDeleteBoolGauge0, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellBoolGauge0), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewBoolGauge1, delete=pywrap_tensorflow.TFE_MonitoringDeleteBoolGauge1, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellBoolGauge1), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewBoolGauge2, delete=pywrap_tensorflow.TFE_MonitoringDeleteBoolGauge2, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellBoolGauge2), ] _sampler_methods = [ _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewSampler0, delete=pywrap_tensorflow.TFE_MonitoringDeleteSampler0, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellSampler0), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewSampler1, delete=pywrap_tensorflow.TFE_MonitoringDeleteSampler1, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellSampler1), _MetricMethod( create=pywrap_tensorflow.TFE_MonitoringNewSampler2, delete=pywrap_tensorflow.TFE_MonitoringDeleteSampler2, get_cell=pywrap_tensorflow.TFE_MonitoringGetCellSampler2), ] class Metric(object): """The base class of metric.""" def __init__(self, metric_name, metric_methods, label_length, *args): """Creates a new metric. Args: metric_name: name of the metric class. metric_methods: list of swig metric methods. label_length: length of label args. *args: the arguments to call create method. """ self._metric_name = metric_name self._metric_methods = metric_methods self._label_length = label_length if label_length >= len(self._metric_methods): raise ValueError('Cannot create {} metric with label >= {}'.format( self._metric_name, len(self._metric_methods))) self._metric = self._metric_methods[self._label_length].create(*args) def __del__(self): try: deleter = self._metric_methods[self._label_length].delete metric = self._metric except AttributeError: return if deleter is not None: deleter(metric) def get_cell(self, *labels): """Retrieves the cell.""" if len(labels) != self._label_length: raise ValueError('The {} expects taking {} labels'.format( self._metric_name, self._label_length)) return self._metric_methods[self._label_length].get_cell( self._metric, *labels) class CounterCell(object): """CounterCell stores each value of a Counter.""" def __init__(self, cell): """Creates a new CounterCell. Args: cell: A c pointer of TFE_MonitoringCounterCell. """ self._cell = cell def increase_by(self, value): """Atomically increments the value. Args: value: non-negative value. """ pywrap_tensorflow.TFE_MonitoringCounterCellIncrementBy(self._cell, value) def value(self): """Retrieves the current value.""" return pywrap_tensorflow.TFE_MonitoringCounterCellValue(self._cell) class Counter(Metric): """A stateful class for updating a cumulative integer metric. This class encapsulates a set of values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to increment each value. """ def __init__(self, name, description, *labels): """Creates a new Counter. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric. """ super(Counter, self).__init__('Counter', _counter_methods, len(labels), name, description, *labels) def get_cell(self, *labels): """Retrieves the cell.""" return CounterCell(super(Counter, self).get_cell(*labels)) class IntGaugeCell(object): """A single integer value stored in an `IntGauge`.""" def __init__(self, cell): """Creates a new IntGaugeCell. Args: cell: A c pointer of TFE_MonitoringIntGaugeCell. """ self._cell = cell def set(self, value): """Atomically set the value. Args: value: integer value. """ pywrap_tensorflow.TFE_MonitoringIntGaugeCellSet(self._cell, value) def value(self): """Retrieves the current value.""" return pywrap_tensorflow.TFE_MonitoringIntGaugeCellValue(self._cell) class IntGauge(Metric): """A stateful class for updating a gauge-like integer metric. This class encapsulates a set of integer values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value. """ def __init__(self, name, description, *labels): """Creates a new IntGauge. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric. """ super(IntGauge, self).__init__('IntGauge', _int_gauge_methods, len(labels), name, description, *labels) def get_cell(self, *labels): """Retrieves the cell.""" return IntGaugeCell(super(IntGauge, self).get_cell(*labels)) class StringGaugeCell(object): """A single string value stored in an `StringGauge`.""" def __init__(self, cell): """Creates a new StringGaugeCell. Args: cell: A c pointer of TFE_MonitoringStringGaugeCell. """ self._cell = cell def set(self, value): """Atomically set the value. Args: value: string value. """ pywrap_tensorflow.TFE_MonitoringStringGaugeCellSet(self._cell, value) def value(self): """Retrieves the current value.""" with c_api_util.tf_buffer() as buffer_: pywrap_tensorflow.TFE_MonitoringStringGaugeCellValue(self._cell, buffer_) value = pywrap_tensorflow.TF_GetBuffer(buffer_).decode('utf-8') return value class StringGauge(Metric): """A stateful class for updating a gauge-like string metric. This class encapsulates a set of string values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value. """ def __init__(self, name, description, *labels): """Creates a new StringGauge. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric. """ super(StringGauge, self).__init__('StringGauge', _string_gauge_methods, len(labels), name, description, *labels) def get_cell(self, *labels): """Retrieves the cell.""" return StringGaugeCell(super(StringGauge, self).get_cell(*labels)) class BoolGaugeCell(object): """A single boolean value stored in an `BoolGauge`.""" def __init__(self, cell): """Creates a new BoolGaugeCell. Args: cell: A c pointer of TFE_MonitoringBoolGaugeCell. """ self._cell = cell def set(self, value): """Atomically set the value. Args: value: bool value. """ pywrap_tensorflow.TFE_MonitoringBoolGaugeCellSet(self._cell, value) def value(self): """Retrieves the current value.""" return pywrap_tensorflow.TFE_MonitoringBoolGaugeCellValue(self._cell) class BoolGauge(Metric): """A stateful class for updating a gauge-like bool metric. This class encapsulates a set of boolean values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value. """ def __init__(self, name, description, *labels): """Creates a new BoolGauge. Args: name: name of the new metric. description: description of the new metric. *labels: The label list of the new metric. """ super(BoolGauge, self).__init__('BoolGauge', _bool_gauge_methods, len(labels), name, description, *labels) def get_cell(self, *labels): """Retrieves the cell.""" return BoolGaugeCell(super(BoolGauge, self).get_cell(*labels)) class SamplerCell(object): """SamplerCell stores each value of a Sampler.""" def __init__(self, cell): """Creates a new SamplerCell. Args: cell: A c pointer of TFE_MonitoringSamplerCell. """ self._cell = cell def add(self, value): """Atomically add a sample. Args: value: float value. """ pywrap_tensorflow.TFE_MonitoringSamplerCellAdd(self._cell, value) def value(self): """Retrieves the current distribution of samples. Returns: A HistogramProto describing the distribution of samples. """ with c_api_util.tf_buffer() as buffer_: pywrap_tensorflow.TFE_MonitoringSamplerCellValue(self._cell, buffer_) proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_) histogram_proto = summary_pb2.HistogramProto() histogram_proto.ParseFromString(compat.as_bytes(proto_data)) return histogram_proto class Buckets(object): """Bucketing strategies for the samplers.""" def __init__(self, buckets): """Creates a new Buckets. Args: buckets: A c pointer of TFE_MonitoringBuckets. """ self.buckets = buckets def __del__(self): pywrap_tensorflow.TFE_MonitoringDeleteBuckets(self.buckets) class ExponentialBuckets(Buckets): """Exponential bucketing strategy. Sets up buckets of the form: [-DBL_MAX, ..., scale * growth^i, scale * growth_factor^(i + 1), ..., DBL_MAX]. """ def __init__(self, scale, growth_factor, bucket_count): """Creates a new exponential Buckets. Args: scale: float growth_factor: float bucket_count: integer """ super(ExponentialBuckets, self).__init__( pywrap_tensorflow.TFE_MonitoringNewExponentialBuckets( scale, growth_factor, bucket_count)) class Sampler(Metric): """A stateful class for updating a cumulative histogram metric. This class encapsulates a set of histograms (or a single histogram for a label-less metric) configured with a list of increasing bucket boundaries. Each histogram is identified by a tuple of labels. The class allows the user to add a sample to each histogram value. """ def __init__(self, name, buckets, description, *labels): """Creates a new Sampler. Args: name: name of the new metric. buckets: bucketing strategy of the new metric. description: description of the new metric. *labels: The label list of the new metric. """ super(Sampler, self).__init__('Sampler', _sampler_methods, len(labels), name, buckets.buckets, description, *labels) def get_cell(self, *labels): """Retrieves the cell.""" return SamplerCell(super(Sampler, self).get_cell(*labels))
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/monitoring.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Cancellation support for eager execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow class CancellationManager(object): """A mechanism for cancelling blocking computation.""" def __init__(self): self._impl = pywrap_tensorflow.TFE_NewCancellationManager() @property def is_cancelled(self): """Returns `True` if `CancellationManager.start_cancel` has been called.""" return pywrap_tensorflow.TFE_CancellationManagerIsCancelled(self._impl) def start_cancel(self): """Cancels blocking operations that have been registered with this object.""" pywrap_tensorflow.TFE_CancellationManagerStartCancel(self._impl) def get_cancelable_function(self, concrete_function): # pylint: disable=protected-access return concrete_function._experimental_with_cancellation_manager(self) def __del__(self): pywrap_tensorflow.TFE_DeleteCancellationManager(self._impl)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/cancellation.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.core.protobuf import config_pb2 from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test @parameterized.named_parameters( dict(testcase_name='Defun', function_decorator=function.defun), dict(testcase_name='DefFunction', function_decorator=def_function.function)) class ArgumentNamingTests(test.TestCase, parameterized.TestCase): """Tests for recognizable export signatures from concrete functions.""" def testBasic(self, function_decorator): @function_decorator def fn(a, b): return a + b, a * b # Call the function to make def_function happy fn(array_ops.ones([]), array_ops.ones([])) fn_op = fn.get_concrete_function( tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32), tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)) self.assertEqual( ['a', 'b'], [inp.op.name for inp in fn_op.inputs]) self.assertEqual( [b'a', b'b'], [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs]) self.assertEqual(2, len(fn_op.graph.structured_outputs)) self.assertAllClose( [3., 2.], fn_op(constant_op.constant(1.), constant_op.constant(2.))) self.assertAllClose( [3., 2.], fn_op(a=constant_op.constant(1.), b=constant_op.constant(2.))) def testVariable(self, function_decorator): @function_decorator def fn(a, b): return a + b, a * b # Call the function to make def_function happy fn(array_ops.ones([]), array_ops.ones([])) fn_op = fn.get_concrete_function( tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32), variables.Variable(1.)) self.assertEqual( ['a', 'b'], [inp.op.name for inp in fn_op.inputs]) self.assertEqual( [b'a', b'b'], [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs]) self.assertEqual(2, len(fn_op.graph.structured_outputs)) def testDictReturned(self, function_decorator): @function_decorator def fn(x, z=(1., 2.), y=3.): z1, z2 = z return {'alpha': x + y + z1, 'beta': x * y + z2} # Call the function to make def_function happy fn(array_ops.ones([])) fn_op = fn.get_concrete_function( x=tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32), y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)) self.assertEqual( ['x', 'y'], [inp.op.name for inp in fn_op.inputs]) self.assertEqual( [b'x', b'y'], [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs]) self.assertEqual({'alpha', 'beta'}, set(fn_op.graph.structured_outputs.keys())) fn_op2 = fn.get_concrete_function( z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32, name='z_first'), tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z_second')), y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'), x=4.) self.assertEqual( ['z_first', 'z_second', 'custom'], [inp.op.name for inp in fn_op2.inputs]) self.assertEqual( [b'z_first', b'z_second', b'custom'], [inp.op.get_attr('_user_specified_name') for inp in fn_op2.inputs]) fn_op3 = fn.get_concrete_function( tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'), z=(tensor_spec.TensorSpec(shape=(None,), dtype=dtypes.float32, name='z1'), tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z2')), y=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)) self.assertEqual( ['custom', 'z1', 'z2', 'y'], [inp.op.name for inp in fn_op3.inputs]) self.assertEqual( [b'custom', b'z1', b'z2', b'y'], [inp.op.get_attr('_user_specified_name') for inp in fn_op3.inputs]) def testMethod(self, function_decorator): class HasMethod(object): @function_decorator def method(self, x): return x has_method = HasMethod() # Call the function to make def_function happy HasMethod.method(has_method, array_ops.ones([])) class_op = HasMethod.method.get_concrete_function( has_method, tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)) self.assertEqual( ['x'], [inp.op.name for inp in class_op.inputs]) self.assertEqual( [b'x'], [inp.op.get_attr('_user_specified_name') for inp in class_op.inputs]) # Call the function to make def_function happy has_method.method(array_ops.ones([])) method_op = has_method.method.get_concrete_function( tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)) self.assertEqual( ['x'], [inp.op.name for inp in method_op.inputs]) self.assertEqual( [b'x'], [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs]) # TODO(allenl): It should be possible to override names when exporting. Do # TensorSpec names need to go in cache keys? Or maybe get_concrete_function # should always retrace? self.skipTest('Not working') method_op = has_method.method.get_concrete_function( tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='y')) self.assertEqual( ['y'], [inp.op.name for inp in method_op.inputs]) self.assertEqual( [b'y'], [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs]) def testMethodSignature(self, function_decorator): class HasMethod(object): @function_decorator( input_signature=(tensor_spec.TensorSpec( shape=None, dtype=dtypes.float64, name='y'),)) def method(self, x): hash(self) # No weak proxies passed as `self` return x has_method = HasMethod() # Call the function to make def_function happy has_method.method(array_ops.ones([], dtype=dtypes.float64)) method_op = has_method.method.get_concrete_function() self.assertEqual( ['y'], [inp.op.name for inp in method_op.inputs]) self.assertEqual( [b'y'], [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs]) method_op2 = has_method.method.get_concrete_function() self.assertEqual( ['y'], [inp.op.name for inp in method_op2.inputs]) self.assertEqual( [b'y'], [inp.op.get_attr('_user_specified_name') for inp in method_op2.inputs]) def testVariadic(self, function_decorator): @function_decorator def variadic_fn(x, *args, **kwargs): return x + math_ops.add_n(list(args) + list(kwargs.values())) # Call the function to make def_function happy variadic_fn(array_ops.ones([]), array_ops.ones([])) variadic_op = variadic_fn.get_concrete_function( tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32), tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'), tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32), tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='second_variadic'), z=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32), zz=tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='cust')) self.assertEqual( ['x', 'y', 'args_1', 'second_variadic', 'z', 'cust'], [inp.op.name for inp in variadic_op.inputs]) self.assertEqual( [b'x', b'y', b'args_1', b'second_variadic', b'z', b'cust'], [inp.op.get_attr('_user_specified_name') for inp in variadic_op.inputs]) def testVariadicInputSignature(self, function_decorator): @function_decorator( input_signature=( tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32), tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32, name='y'), tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32), tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32, name='z'), )) def variadic_fn(x, *args): return x + math_ops.add_n(list(args)) # Call the function to make def_function happy variadic_fn(array_ops.ones([]), array_ops.ones([]), array_ops.ones([]), array_ops.ones([])) variadic_op = variadic_fn.get_concrete_function() self.assertIn(b'variadic_fn', variadic_op.name) self.assertEqual( ['x', 'y', 'args_1', 'z'], [inp.op.name for inp in variadic_op.inputs]) self.assertEqual( [b'x', b'y', b'args_1', b'z'], [inp.op.get_attr('_user_specified_name') for inp in variadic_op.inputs]) if __name__ == '__main__': ops.enable_eager_execution( config=config_pb2.ConfigProto(device_count={'CPU': 4})) test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/function_argument_naming_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Gradient tape utilites.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib from tensorflow.python import pywrap_tensorflow from tensorflow.python.util.lazy_loader import LazyLoader # There is a circular dependency between this, ops.py, and # distribution_strategy_context. # TODO(b/117329403): Remove this circular dependency. distribution_strategy_context = LazyLoader( "distribution_strategy_context", globals(), "tensorflow.python.distribute." "distribution_strategy_context") class Tape(object): """Represents a gradient propagation trace.""" def __init__(self, tape): self._tape = tape def watched_variables(self): return pywrap_tensorflow.TFE_Py_TapeWatchedVariables(self._tape) def push_new_tape(persistent=False, watch_accessed_variables=True): """Pushes a new tape onto the tape stack.""" tape = pywrap_tensorflow.TFE_Py_TapeSetNew(persistent, watch_accessed_variables) return Tape(tape) def push_tape(tape): """Pushes an existing tape onto the tape stack.""" pywrap_tensorflow.TFE_Py_TapeSetAdd(tape._tape) # pylint: disable=protected-access def watch(tape, tensor): """Marks this tensor to be watched by the given tape.""" pywrap_tensorflow.TFE_Py_TapeWatch(tape._tape, tensor) # pylint: disable=protected-access def watch_variable(tape, variable): """Marks this variable to be watched by the given tape.""" strategy, context = ( distribution_strategy_context.get_strategy_and_replica_context()) if context: variables = [strategy.extended.value_container(variable)] else: variables = strategy.experimental_local_results(variable) for var in variables: pywrap_tensorflow.TFE_Py_TapeWatchVariable(tape._tape, var) # pylint: disable=protected-access def variable_accessed(variable): """Notifies all tapes in the stack that a variable has been accessed. Args: variable: variable to be watched. """ strategy, context = ( distribution_strategy_context.get_strategy_and_replica_context()) if context: variables = [strategy.extended.value_container(variable)] else: variables = strategy.experimental_local_results(variable) for var in variables: pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var) def variables_accessed(variables): """Notifies all tapes in the stack that variables have been accessed. Only trainable variables are marked as accessed. Args: variables: iterable of variables to mark as accessed. """ strategy, context = ( distribution_strategy_context.get_strategy_and_replica_context()) accessed = [] if context: accessed = [strategy.extended.value_container(variable) for variable in variables if variable.trainable] else: for variable in variables: if variable.trainable: accessed.extend(strategy.experimental_local_results(variable)) for var in accessed: pywrap_tensorflow.TFE_Py_TapeVariableAccessed(var) def pop_tape(tape): """Pops the given tape in the stack.""" pywrap_tensorflow.TFE_Py_TapeSetRemove(tape._tape) # pylint: disable=protected-access @contextlib.contextmanager def stop_recording(): is_stopped = pywrap_tensorflow.TFE_Py_TapeSetIsStopped() try: if not is_stopped: pywrap_tensorflow.TFE_Py_TapeSetStopOnThread() yield finally: if not is_stopped: pywrap_tensorflow.TFE_Py_TapeSetRestartOnThread() def should_record(tensors): """Returns true if any tape in the stack watches any of these tensors.""" return pywrap_tensorflow.TFE_Py_TapeSetShouldRecord(tensors) def record_operation(op_type, output_tensors, input_tensors, backward_function): """Records the operation on all tapes in the stack.""" pywrap_tensorflow.TFE_Py_TapeSetRecordOperation( op_type, output_tensors, input_tensors, backward_function) def delete_trace(tensor_id): """Deletes traces for this Tensor from all tapes in the stack.""" pywrap_tensorflow.TFE_Py_TapeSetDeleteTrace(tensor_id) def could_possibly_record(): """Returns True if any tape is active.""" return not pywrap_tensorflow.TFE_Py_TapeSetIsEmpty()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/tape.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import re import weakref from six.moves import range from tensorflow.python.autograph.core import converter from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import lift_to_graph from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.keras.engine import training from tensorflow.python.keras.layers import core from tensorflow.python.module import module from tensorflow.python.ops import array_ops from tensorflow.python.ops import cond_v2 from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adam class _ModelWithOptimizer(training.Model): def __init__(self): super(_ModelWithOptimizer, self).__init__() self.dense = core.Dense(1) self.optimizer = adam.AdamOptimizer(0.01) @def_function.function( input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32), tensor_spec.TensorSpec([None], dtypes.float32))) def call(self, x, y): with backprop.GradientTape() as tape: loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.) trainable_variables = self.trainable_variables gradients = tape.gradient(loss, trainable_variables) self.optimizer.apply_gradients(zip(gradients, trainable_variables)) return {'loss': loss} class _HasDecoratedMethod(object): @def_function.function def f(self, x): return x * 3. class DefFunctionTest(test.TestCase): def testNoVariables(self): @def_function.function def fn(x): return 2 * x self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0) def testFailIfVariablesAreCreatedMoreThanOnce(self): @def_function.function def fn(x): return variables.Variable(1.0) + x with self.assertRaises(ValueError): fn(1.0) def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self): state = [] @def_function.function def fn(x): state.append(variables.Variable(1.0)) return state[-1] + x with self.assertRaises(ValueError): fn(1.0) def testRange(self): @def_function.function def f(unused_x): return 1.0 self.assertAllEqual(f(range(5)), 1.0) def testCorrectVariableCreation(self): state = [] @def_function.function def fn(x): if not state: state.append(variables.Variable(2.0)) return state[0] * x self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0) self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0) def testFunctionInitializer(self): state = [] @def_function.function def fn(x): if not state: state.append(variables.Variable(lambda: 2.0)) return state[0] * x self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0) def testFunctionInitializationFunction(self): state = [] @def_function.function def fn(x): if not state: state.append(variables.Variable(2.0)) return state[0] * x init_fn = fn.get_initialization_function(constant_op.constant(1.0)) self.assertEqual(len(state), 1) self.assertFalse( resource_variable_ops.var_is_initialized_op(state[0].handle)) init_fn() self.assertEqual(state[0].numpy(), 2.0) def testVariableInitializerNotConstant(self): state = [] @def_function.function def fn(x): if not state: state.append(variables.Variable(2.0 * x)) return state[0] * x self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0) self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0) def testLegacyGraphModeVariables(self): with ops.Graph().as_default(), self.test_session() as sess: state = [] @def_function.function def fn(x): if not state: state.append(variables.Variable(2.0)) return state[0] * x result = fn(3.0) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(sess.run(state[0]), 2.0) self.assertAllEqual(self.evaluate(result), 6.0) def testLegacyGraphModeVariablesNonTrivialInitializer(self): with ops.Graph().as_default(), self.test_session() as sess: state = [] @def_function.function def fn(x): if not state: two = constant_op.constant(2.0) four = two * two two_again = math_ops.sqrt(four) state.append(variables.Variable(two_again + four)) return state[0] * x result = fn(3.0) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(sess.run(state[0]), 6.0) self.assertAllEqual(self.evaluate(result), 18.0) def testLegacyGraphModeInputDependentInitializerFails(self): with ops.Graph().as_default(): state = [] @def_function.function def fn(x): if not state: state.append(variables.Variable(2.0 * x)) return state[0] * x with self.assertRaisesRegexp( lift_to_graph.UnliftableError, r'transitively.* mul .* x'): fn(constant_op.constant(3.0)) def testMethod(self): class MyModel(object): def __init__(self): self.var = None @def_function.function def apply(self, x): if self.var is None: self.var = variables.Variable(2.0) return self.var * x m0 = MyModel() self.assertAllEqual(m0.apply(3.0), 6.0) # Calling twice to exercise that we do not recreate variables. m0.var.assign(3.0) self.assertAllEqual(m0.apply(3.0), 9.0) m1 = MyModel() self.assertAllEqual(m1.apply(3.0), 6.0) def test_functools_partial(self): self.assertAllClose( 3., def_function.function(functools.partial(lambda x, y: x + y, 1.))( constant_op.constant(2.))) def test_functools_partial_new_default(self): def f(x=3, y=7): return x + y func = def_function.function(functools.partial(f, y=6)) self.assertEqual(func().numpy(), 9) self.assertEqual(func(y=8).numpy(), 11) def test_functools_partial_keywords(self): def f(x, y): return x + y func = def_function.function( functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1]))) self.assertAllEqual(func(), [0.0]) def test_functools_partial_single_positional(self): def f(x, y): return x + y func = def_function.function( functools.partial(f, constant_op.constant(1))) self.assertAllEqual(func(5), 6) def test_complicated_partial_with_defaults(self): def identity(*args): return args def dynamic_unroll(core_fn, input_sequence, initial_state, sequence_length=None, parallel_iterations=1, swap_memory=False): del core_fn self.assertIs(None, sequence_length) self.assertEqual(1, parallel_iterations) self.assertTrue(swap_memory) return input_sequence, initial_state input_sequence = random_ops.random_uniform([1, 1, 1]) initial_state = random_ops.random_uniform([1, 1]) func = def_function.function( functools.partial(dynamic_unroll, identity, swap_memory=True)) func(input_sequence, initial_state) def test_unspecified_default_argument(self): wrapped = def_function.function( lambda x, y=2: x + y, input_signature=[tensor_spec.TensorSpec((), dtypes.int32)]) self.assertEqual(3, wrapped(constant_op.constant(1)).numpy()) def test_optimizer(self): x = constant_op.constant([[3., 4.]]) y = constant_op.constant([2.]) model = _ModelWithOptimizer() model(x, y) def test_concrete_function_from_signature(self): @def_function.function( input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]) def compute(x): return 2. * x concrete = compute.get_concrete_function() self.assertAllClose(1., concrete(constant_op.constant(0.5))) concrete = compute.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32)) self.assertAllClose(4., concrete(constant_op.constant(2.))) signature_args, _ = concrete.structured_input_signature self.assertEqual(signature_args, (tensor_spec.TensorSpec( None, dtypes.float32, name='x'),)) @test_util.run_in_graph_and_eager_modes def test_variable_naming(self): class HasVars(module.Module): def __init__(self): self.x = None self.y = None self.z = None @def_function.function def make_x(self): if self.x is None: self.x = variables.Variable(1., name='v') def make_y(self): if self.y is None: self.y = variables.Variable(1., name='v') def make_z(self): if self.z is None: with ops.name_scope('z_scope'): self.z = variables.Variable(1., name='z') root = HasVars() root.make_x() root.make_y() root.make_z() self.assertEqual('v:0', root.x.name) self.assertEqual('z_scope/z:0', root.z.name) def test_concrete_function_keyword_arguments(self): @def_function.function def f(x): return x conc = f.get_concrete_function( tensor_spec.TensorSpec(None, dtypes.float32, 'y')) conc(y=constant_op.constant(3.0)) signature_args, _ = conc.structured_input_signature self.assertEqual('y', signature_args[0].name) conc = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32)) conc(x=constant_op.constant(3.0)) signature_args, _ = conc.structured_input_signature self.assertEqual('x', signature_args[0].name) @def_function.function def g(x): return x[0] conc = g.get_concrete_function( [tensor_spec.TensorSpec(None, dtypes.float32, 'z'), 2]) conc(z=constant_op.constant(3.0)) signature_args, _ = conc.structured_input_signature self.assertEqual('z', signature_args[0][0].name) with self.assertRaisesRegexp( ValueError, 'either zero or all names have to be specified'): conc = g.get_concrete_function([ tensor_spec.TensorSpec(None, dtypes.float32, 'z'), tensor_spec.TensorSpec(None, dtypes.float32), ]) def test_error_inner_capture(self): @def_function.function def f(inputs): num_steps, _ = inputs.shape[:2] outputs = [] for t in math_ops.range(num_steps): outputs.append(inputs[t]) return outputs with self.assertRaisesRegexp(errors.InaccessibleTensorError, 'defined in another function or code block'): f(array_ops.zeros(shape=(8, 42, 3))) def testRuntimeErrorNotSticky(self): @def_function.function def fail(i): control_flow_ops.Assert(math_ops.equal(i, 0), ['ick']) fail(constant_op.constant(0)) # OK with self.assertRaises(errors.InvalidArgumentError): fail(constant_op.constant(1)) # InvalidArgument: "ick" fail(constant_op.constant(0)) # OK def testUnderscoreName(self): @def_function.function def f(_): return _ + _ self.assertAllEqual(2.0, f(constant_op.constant(1.0))) def test_serialization_signature_cache(self): @def_function.function def f(x, y): return x, y f(constant_op.constant([[3., 4.]]), constant_op.constant([2.])) f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2])) signatures_args = set() concrete_functions = f._list_all_concrete_functions_for_serialization() for concrete_function in concrete_functions: args, kwargs = concrete_function.structured_input_signature signatures_args.add(args) self.assertEqual(dict(), kwargs) self.assertEqual( signatures_args, set(((tensor_spec.TensorSpec([1, 2], dtypes.float32, name='x'), tensor_spec.TensorSpec([1], dtypes.float32, name='y')), (tensor_spec.TensorSpec([1, 3], dtypes.int32, name='x'), tensor_spec.TensorSpec([1], dtypes.int32, name='y'))))) @test_util.assert_no_garbage_created def testFunctionReferenceCycles(self): fn = def_function.function(lambda x: 2. * x) fn(constant_op.constant(4.0)) weak_fn = weakref.ref(fn) del fn # Tests that the weak reference we made to the function is now dead, which # means the object has been deleted. This should be true as long as the # function itself is not involved in a reference cycle. self.assertIs(None, weak_fn()) @test_util.assert_no_garbage_created def testMethodReferenceCycles(self): has_decorated_method = _HasDecoratedMethod() has_decorated_method.f(constant_op.constant(5.)) weak_fn = weakref.ref(has_decorated_method.f) del has_decorated_method # Tests that the weak reference we made to the function is now dead, which # means the object has been deleted. This should be true as long as the # function itself is not involved in a reference cycle. self.assertIs(None, weak_fn()) @test_util.assert_no_new_pyobjects_executing_eagerly def testErrorMessageWhenGraphTensorIsPassedToEager(self): @def_function.function def failing_function(): a = constant_op.constant(1.) with ops.init_scope(): _ = a + a with self.assertRaisesRegexp( TypeError, re.compile('An op outside of the function.*passed.*Const', re.DOTALL)): failing_function() def testNonUniqueNamesGetConcreteFunction(self): @def_function.function def non_unique_arg_names(x, **kwargs): a, b, c = x d = kwargs['d'] return a + b + c + d concrete = non_unique_arg_names.get_concrete_function( (tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32), tensor_spec.TensorSpec(None, dtypes.float32)), d=tensor_spec.TensorSpec(None, dtypes.float32)) self.assertAllClose( 10., concrete(x=constant_op.constant(1.), x_1=constant_op.constant(2.), x_2=constant_op.constant(3.), d=constant_op.constant(4.))) self.assertAllClose( 10., concrete(constant_op.constant(1.), constant_op.constant(2.), constant_op.constant(3.), constant_op.constant(4.))) def testVariableCreatorScope(self): created_variables = [] captured_variables = [] @def_function.function def f(): if not created_variables: created_variables.append(variables.Variable(1.)) return created_variables[0] + 1. def capture_creator(next_creator, **kwargs): created = next_creator(**kwargs) captured_variables.append(created) return created with variable_scope.variable_creator_scope(capture_creator): f() self.assertEqual(created_variables, captured_variables) def testVarAlreadyInitializedNoClobbering(self): v_holder = [] @def_function.function def add_var(x): if not v_holder: v = variables.Variable([1., 2.]) v_holder.append(v) already_initialized = variables.Variable(3.) with ops.init_scope(): already_initialized.assign(10.) v_holder.append(already_initialized) return v_holder[0] + v_holder[1] + x add_var.get_concrete_function(constant_op.constant(2.)) self.assertAllClose([13., 14.], add_var(constant_op.constant(2.))) def testSameVariableTwice(self): v = variables.Variable(1.0) @def_function.function def add(a, b): return a + b self.assertAllEqual(add(v, v), 2.0) def testShapeCache(self): @def_function.function def func(x): return 2 * x func_a = func.get_concrete_function( tensor_spec.TensorSpec([None], dtypes.int32)) func_b = func.get_concrete_function( tensor_spec.TensorSpec([None], dtypes.int32)) self.assertIs(func_a, func_b) def testInitializationInNestedCall(self): v_holder = [] @def_function.function def add_var(x): if not v_holder: v = variables.Variable([1., 2.]) v_holder.append(v) already_initialized = variables.Variable(3.) with ops.init_scope(): already_initialized.assign(10.) v_holder.append(already_initialized) return v_holder[0] + v_holder[1] + x @def_function.function def wrapper(x): return add_var(x) self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.))) v_holder[1].assign(11.) self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.))) # TODO(b/137148281): reenable @test_util.run_gpu_only def testDeviceAnnotationRespected(self): a = [] @def_function.function() def create_variable(): with ops.init_scope(): initial_value = random_ops.random_uniform( (2, 2), maxval=1000000, dtype=dtypes.int64) if not a: with ops.device('CPU:0'): a.append(resource_variable_ops.ResourceVariable(initial_value)) return a[0].read_value() created_variable_read = create_variable() self.assertRegexpMatches(a[0].device, 'CPU') def testDecorate(self): func = def_function.function(lambda: 1) def decorator(f): return lambda: 1 + f() func._decorate(decorator) self.assertEqual(func().numpy(), 2) def testLiftPlaceholderInitializedVariable(self): with ops.Graph().as_default(): var_list = [] @def_function.function def use_variable(): if not var_list: initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32) v = variables.Variable(initial_value) var_list.append(v) return var_list[0] + 1. var_plus_one = use_variable() with self.session() as session: init_op = var_list[0].initializer session.run(init_op, feed_dict={init_op.inputs[1]: 2.}) self.assertEqual(3., session.run(var_plus_one)) def testDecorate_rejectedAfterTrace(self): func = def_function.function(lambda: 1) self.assertEqual(func().numpy(), 1) msg = 'Functions cannot be decorated after they have been traced.' with self.assertRaisesRegexp(ValueError, msg): func._decorate(lambda f: f) def test_recursive_tf_function(self): @def_function.function def recursive_fn(n): if n > 0: return recursive_fn(n - 1) return 1 self.assertEqual(recursive_fn(5).numpy(), 1) def test_recursive_tf_function_with_gradients(self): @def_function.function def recursive_fn(n, x): if n > 0: return n * recursive_fn(n - 1, x) else: return x x = variables.Variable(1.0) with backprop.GradientTape() as tape: g = recursive_fn(5, x) dg_dx = tape.gradient(g, x) self.assertEqual(dg_dx.numpy(), 120) def test_recursive_python_function(self): def recursive_py_fn(n): if n > 0: return recursive_py_fn(n - 1) return 1 @def_function.function def recursive_fn(n): return recursive_py_fn(n) self.assertEqual(recursive_fn(5).numpy(), 1) def test_recursive_python_function_with_gradients(self): def recursive_py_fn(n, x): if n > 0: return n * recursive_py_fn(n - 1, x) return x @def_function.function def recursive_fn(n, x): return recursive_py_fn(n, x) x = variables.Variable(1.0) with backprop.GradientTape() as tape: g = recursive_fn(5, x) dg_dx = tape.gradient(g, x) self.assertEqual(dg_dx.numpy(), 120) def test_recursive_tf_function_call_each_other(self): @def_function.function def recursive_fn1(n): if n <= 1: return 1 return recursive_fn2(n - 1) @def_function.function def recursive_fn2(n): if n <= 1: return 2 return recursive_fn1(n - 1) self.assertEqual(recursive_fn1(5).numpy(), 1) self.assertEqual(recursive_fn1(6).numpy(), 2) self.assertEqual(recursive_fn2(5).numpy(), 2) self.assertEqual(recursive_fn2(6).numpy(), 1) def test_recursive_tf_function_call_each_other_with_gradients(self): @def_function.function def recursive_fn1(n, x): if n <= 1: return x return n * recursive_fn2(n - 1, x) @def_function.function def recursive_fn2(n, x): if n <= 1: return 2 * x return n * recursive_fn1(n - 1, x) x = variables.Variable(1.0) with backprop.GradientTape() as tape: g1 = recursive_fn1(5, x) dg1_dx = tape.gradient(g1, x) self.assertEqual(dg1_dx.numpy(), 120) with backprop.GradientTape() as tape: g2 = recursive_fn2(5, x) dg2_dx = tape.gradient(g2, x) self.assertEqual(dg2_dx.numpy(), 240) def test_recursive_tf_function_with_cond(self): @def_function.function(autograph=False) def recursive_fn(n): return cond_v2.cond_v2(n > 0, recursive_fn(n - 1), 1) with self.assertRaises(RecursionError): recursive_fn(constant_op.constant(5)) if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/def_function_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions called by the generated code to execute an eager-mode op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from google.protobuf import text_format from tensorflow.core.framework import tensor_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import core from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.util import compat def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None): """Execute a TensorFlow operation. Args: op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to execute. num_outputs: The number of outputs of the operation to fetch. (Explicitly provided instead of being inferred for performance reasons). inputs: A list of inputs to the operation. Each entry should be a Tensor, or a value which can be passed to the Tensor constructor to create one. attrs: A tuple with alternating string attr names and attr values for this operation. ctx: The value of context.context(). name: Customized name for the operation. Returns: List of output Tensor objects. The list is empty if there are no outputs Raises: An exception on error. """ device_name = ctx.device_name # pylint: disable=protected-access try: ctx.ensure_initialized() tensors = pywrap_tensorflow.TFE_Py_Execute(ctx._handle, device_name, op_name, inputs, attrs, num_outputs) except core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message six.raise_from(core._status_to_exception(e.code, message), None) except TypeError as e: keras_symbolic_tensors = [ x for x in inputs if ops._is_keras_symbolic_tensor(x) ] if keras_symbolic_tensors: raise core._SymbolicException( "Inputs to eager execution function cannot be Keras symbolic " "tensors, but found {}".format(keras_symbolic_tensors)) raise e # pylint: enable=protected-access return tensors def execute_with_cancellation(op_name, num_outputs, inputs, attrs, ctx, cancellation_manager, name=None): """Execute a TensorFlow operation. Args: op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to execute. num_outputs: The number of outputs of the operation to fetch. (Explicitly provided instead of being inferred for performance reasons). inputs: A list of inputs to the operation. Each entry should be a Tensor, or a value which can be passed to the Tensor constructor to create one. attrs: A tuple with alternating string attr names and attr values for this operation. ctx: The value of context.context(). cancellation_manager: a `CancellationManager` object that can be used to cancel the operation. name: Customized name for the operation. Returns: List of output Tensor objects. The list is empty if there are no outputs Raises: An exception on error. """ device_name = ctx.device_name # pylint: disable=protected-access try: ctx.ensure_initialized() tensors = pywrap_tensorflow.TFE_Py_ExecuteCancelable( ctx._handle, device_name, op_name, inputs, attrs, cancellation_manager._impl, num_outputs) except core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message six.raise_from(core._status_to_exception(e.code, message), None) except TypeError as e: keras_symbolic_tensors = [ x for x in inputs if ops._is_keras_symbolic_tensor(x) ] if keras_symbolic_tensors: raise core._SymbolicException( "Inputs to eager execution function cannot be Keras symbolic " "tensors, but found {}".format(keras_symbolic_tensors)) raise e # pylint: enable=protected-access return tensors def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None): """Monkey-patch to execute to enable execution callbacks.""" tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) for callback in ctx.post_execution_callbacks: callback(op_name, tuple(inputs), attrs, tensors, name) return tensors execute = quick_execute def record_gradient(unused_op_name, unused_inputs, unused_attrs, unused_results, unused_name): """Import backprop if you want gradients recorded.""" pass def make_float(v, arg_name): if not isinstance(v, compat.real_types): raise TypeError("Expected float for argument '%s' not %s." % (arg_name, repr(v))) return float(v) def make_int(v, arg_name): if isinstance(v, six.string_types): raise TypeError("Expected int for argument '%s' not %s." % (arg_name, repr(v))) try: return int(v) except (ValueError, TypeError): raise TypeError("Expected int for argument '%s' not %s." % (arg_name, repr(v))) def make_str(v, arg_name): if not isinstance(v, compat.bytes_or_text_types): raise TypeError("Expected string for argument '%s' not %s." % (arg_name, repr(v))) return compat.as_bytes(v) # Convert unicode strings to bytes. def make_bool(v, arg_name): if not isinstance(v, bool): raise TypeError("Expected bool for argument '%s' not %s." % (arg_name, repr(v))) return v def make_type(v, arg_name): try: v = dtypes.as_dtype(v).base_dtype except TypeError: raise TypeError("Expected DataType for argument '%s' not %s." % (arg_name, repr(v))) i = v.as_datatype_enum return i def make_shape(v, arg_name): """Convert v into a list.""" # Args: # v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape. # arg_name: String, for error messages. # Returns: # None if the rank is unknown, otherwise a list of ints (or Nones in the # position where the dimension is unknown). try: shape = tensor_shape.as_shape(v) except TypeError as e: raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e)) except ValueError as e: raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name, e)) if shape.ndims is None: return None else: return shape.as_list() def make_tensor(v, arg_name): """Ensure v is a TensorProto.""" if isinstance(v, tensor_pb2.TensorProto): return v elif isinstance(v, six.string_types): pb = tensor_pb2.TensorProto() text_format.Merge(v, pb) return pb raise TypeError( "Don't know how to convert %s to a TensorProto for argument '%s'." % (repr(v), arg_name)) def args_to_matching_eager(l, ctx, default_dtype=None): """Convert sequence `l` to eager same-type Tensors.""" EagerTensor = ops.EagerTensor # pylint: disable=invalid-name for x in l: if not isinstance(x, EagerTensor): break else: # note: intentional for-else return l[0]._datatype_enum(), l # pylint: disable=protected-access # TODO(josh11b): Could we do a better job if we also passed in the # allowed dtypes when that was known? # Is some input already a Tensor with a dtype? dtype = None for t in l: if isinstance(t, EagerTensor): dtype = t.dtype break internal_convert_to_tensor = ops.internal_convert_to_tensor if dtype is None: # Infer a dtype based on the first value, and use that dtype for the # remaining values. ret = [] for t in l: ret.append( internal_convert_to_tensor( t, dtype, preferred_dtype=default_dtype, ctx=ctx)) if dtype is None: dtype = ret[-1].dtype else: ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l] # TODO(slebedev): consider removing this as it leaks a Keras concept. # pylint: disable=protected-access keras_symbolic_tensors = [x for x in ret if ops._is_keras_symbolic_tensor(x)] if keras_symbolic_tensors: raise core._SymbolicException( "Using symbolic output of a Keras layer during eager execution " "{}".format(keras_symbolic_tensors)) # pylint: enable=protected-access return dtype.as_datatype_enum, ret def convert_to_mixed_eager_tensors(values, ctx): v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values] types = [t._datatype_enum() for t in v] # pylint: disable=protected-access return types, v def args_to_mixed_eager_tensors(lists, ctx): """Converts a list of same-length lists of values to eager tensors.""" assert len(lists) > 1 # Generate an error if len(lists[i]) is not the same for all i. lists_ret = [] for l in lists[1:]: if len(l) != len(lists[0]): raise ValueError( "Expected list arguments to be the same length: %d != %d (%r vs. %r)." % (len(lists[0]), len(l), lists[0], l)) lists_ret.append([]) # Convert the first element of each list first, then the second element, etc. types = [] for i in range(len(lists[0])): dtype = None # If any list has a Tensor, use that dtype for l in lists: if isinstance(l[i], ops.EagerTensor): dtype = l[i].dtype break if dtype is None: # Convert the first one and use its dtype. lists_ret[0].append(ops.internal_convert_to_tensor(lists[0][i], ctx=ctx)) dtype = lists_ret[0][i].dtype for j in range(1, len(lists)): lists_ret[j].append( ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx)) else: # Convert everything to the found dtype. for j in range(len(lists)): lists_ret[j].append( ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx)) types.append(dtype.as_datatype_enum) return types, lists_ret
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/execute.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Graph-only versions of a few op functions, for internal use only.""" # Must be separate from array_ops to avoid a cyclic dependency. from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape def graph_zeros_like(tensor): """Graph-only version of tf.zeros_like(), for internal use only.""" g = ops._get_graph_from_inputs([tensor]) # pylint: disable=protected-access with g.as_default(), ops.name_scope(None, "zeros_like", [tensor]) as name: tensor = ops.convert_to_tensor(tensor, name="tensor") dtype = tensor.dtype.base_dtype dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum) op = g.create_op("ZerosLike", [tensor], [dtype], input_types=[dtype], attrs={"T": dtype_value}, name=name) result, = op.outputs return result def graph_placeholder(dtype, shape, name=None): """Graph-only version of tf.compat.v1.placeholder(), for internal use only.""" dtype = dtype.base_dtype dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum) if isinstance(shape, (list, tuple)): shape = tensor_shape.TensorShape(shape) shape = attr_value_pb2.AttrValue(shape=shape.as_proto()) g = ops.get_default_graph() with ops.name_scope(name, "placeholder", []) as name: op = g.create_op("Placeholder", [], [dtype], input_types=[], attrs={"dtype": dtype_value, "shape": shape}, name=name) result, = op.outputs return result
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/graph_only_ops.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for TensorFlow "Eager" Mode's Tensor class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import re import sys import unittest import numpy as np import six from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import io_ops from tensorflow.python.ops import variables def _create_tensor(value, device=None, dtype=None): context.ensure_initialized() ctx = context.context() if device is None: device = ctx.device_name if dtype is not None: dtype = dtype.as_datatype_enum try: return ops.EagerTensor(value, device=device, dtype=dtype) except core._NotOkStatusException as e: # pylint: disable=protected-access raise core._status_to_exception(e.code, e.message) class TFETensorTest(test_util.TensorFlowTestCase): def testScalarTensor(self): t = _create_tensor(3, dtype=dtypes.int32) self.assertAllEqual(t, _create_tensor(np.array(3))) self.assertEqual(dtypes.int32, t.dtype) self.assertEqual(0, t.shape.ndims) self.assertAllEqual([], t.shape.as_list()) self.assertIn("tf.Tensor", str(t)) self.assertIn("tf.Tensor", repr(t)) def testBadConstructorArgs(self): context.ensure_initialized() ctx = context.context() device = ctx.device_name # Missing device. with self.assertRaisesRegexp(TypeError, r".*argument 'device' \(pos 2\).*"): ops.EagerTensor(1) # Bad dtype type. with self.assertRaisesRegexp(TypeError, "Expecting a DataType value for dtype. Got"): ops.EagerTensor(1, device=device, dtype="1") # Following errors happen when trying to copy to GPU. if not test_util.is_gpu_available(): self.skipTest("No GPUs found") with ops.device("/device:GPU:0"): # Bad device. with self.assertRaisesRegexp(TypeError, "Error parsing device argument"): ops.EagerTensor(1.0, device=1) def testNumpyValue(self): values = np.array([3.0]) t = _create_tensor(values) self.assertAllEqual(values, t) @test_util.assert_no_new_pyobjects_executing_eagerly def testNumpyDtypeSurvivesThroughTensorConversion(self): scalar_creators = [np.int32, np.int64, np.float32, np.float64] conversion_functions = [ops.convert_to_tensor, constant_op.constant] for scalar_creator in scalar_creators: for conversion_function in conversion_functions: np_val = scalar_creator(3) tensor_val = conversion_function(np_val) self.assertEqual(tensor_val.numpy().dtype, np_val.dtype) self.assertEqual(tensor_val.numpy(), np_val) def testNumpyValueWithCast(self): values = np.array([3.0], dtype=np.float32) t = _create_tensor(values, dtype=dtypes.float64) self.assertAllEqual(values, t) ctx = context.context() # Bad dtype value. with self.assertRaisesRegexp(TypeError, "Invalid dtype argument value"): ops.EagerTensor(values, device=ctx.device_name, dtype=12345) def testNumpyOrderHandling(self): n = np.array([[1, 2], [3, 4]], order="F") t = _create_tensor(n) self.assertAllEqual([[1, 2], [3, 4]], t) def testNumpyArrayDtype(self): tensor = constant_op.constant([1.0, 2.0, 3.0]) numpy_tensor = np.asarray(tensor, dtype=np.int32) self.assertAllEqual(numpy_tensor, [1, 2, 3]) def testNdimsAgreesWithNumpy(self): numpy_tensor = np.asarray(1.0) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) numpy_tensor = np.asarray([1.0, 2.0, 3.0]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(numpy_tensor.ndim, tensor.ndim) def testLenAgreesWithNumpy(self): numpy_tensor = np.asarray(1.0) tensor = constant_op.constant(numpy_tensor) with self.assertRaises(TypeError): len(numpy_tensor) with self.assertRaisesRegexp( TypeError, r"Scalar tensor has no `len[(][)]`"): len(tensor) numpy_tensor = np.asarray([1.0, 2.0, 3.0]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(len(numpy_tensor), len(tensor)) numpy_tensor = np.asarray([[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]]) tensor = constant_op.constant(numpy_tensor) self.assertAllEqual(len(numpy_tensor), len(tensor)) def testCopy(self): t = constant_op.constant(1.0) tt = copy.copy(t) self.assertAllEqual(tt, 1.0) del tt tt = copy.deepcopy(t) self.assertAllEqual(tt, 1.0) del tt self.assertAllEqual(t, 1.0) def testConstantDtype(self): self.assertEqual( constant_op.constant(1, dtype=np.int64).dtype, dtypes.int64) def testTensorAndNumpyMatrix(self): expected = np.array([[1.0, 2.0], [3.0, 4.0]], np.float32) actual = _create_tensor([[1.0, 2.0], [3.0, 4.0]]) self.assertAllEqual(expected, actual) self.assertEqual(np.float32, actual.dtype) self.assertEqual(dtypes.float32, actual.dtype) self.assertAllEqual([2, 2], actual.shape.as_list()) def testFloatDowncast(self): # Unless explicitly specified, float64->float32 t = _create_tensor(3.0) self.assertEqual(dtypes.float32, t.dtype) t = _create_tensor(3.0, dtype=dtypes.float64) self.assertEqual(dtypes.float64, t.dtype) def testBool(self): self.assertFalse(bool(_create_tensor(False))) self.assertFalse(bool(_create_tensor([False]))) self.assertFalse(bool(_create_tensor([[False]]))) self.assertFalse(bool(_create_tensor([0]))) self.assertFalse(bool(_create_tensor([0.]))) self.assertTrue(bool(_create_tensor([1]))) self.assertTrue(bool(_create_tensor([1.]))) @unittest.skipUnless(six.PY2, "long has been removed in PY3") def testLong(self): self.assertEqual(long(_create_tensor(long(42))), 42) def testIndex(self): self.assertEqual([42][_create_tensor(0)], 42) with self.assertRaises(TypeError): _ = [42][_create_tensor([0])] def testIntDowncast(self): t = _create_tensor(3) self.assertEqual(dtypes.int32, t.dtype) t = _create_tensor(3, dtype=dtypes.int64) self.assertEqual(dtypes.int64, t.dtype) t = _create_tensor(2**33) self.assertEqual(dtypes.int64, t.dtype) def testTensorCreationFailure(self): with self.assertRaises(ValueError): # Should fail because the each row of the Python object has a different # number of columns. self.assertEqual(None, _create_tensor([[1], [1, 2]])) def testMultiLineTensorStr(self): t = _create_tensor(np.eye(3)) tensor_str = str(t) self.assertIn("shape=%s, dtype=%s" % (t.shape, t.dtype.name), tensor_str) self.assertIn(str(t), tensor_str) def testMultiLineTensorRepr(self): t = _create_tensor(np.eye(3)) tensor_repr = repr(t) self.assertTrue(tensor_repr.startswith("<")) self.assertTrue(tensor_repr.endswith(">")) self.assertIn("id=%d, shape=%s, dtype=%s, numpy=\n%r" % (t._id, t.shape, t.dtype.name, t.numpy()), tensor_repr) def testTensorStrReprObeyNumpyPrintOptions(self): orig_threshold = np.get_printoptions()["threshold"] orig_edgeitems = np.get_printoptions()["edgeitems"] np.set_printoptions(threshold=2, edgeitems=1) t = _create_tensor(np.arange(10, dtype=np.int32)) self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", str(t))) self.assertTrue(re.match(r".*\[.*0.*\.\.\..*9.*\]", repr(t))) # Clean up: reset to previous printoptions. np.set_printoptions(threshold=orig_threshold, edgeitems=orig_edgeitems) def testZeroDimTensorStr(self): t = _create_tensor(42) self.assertIn("42, shape=(), dtype=int32", str(t)) def testZeroDimTensorRepr(self): t = _create_tensor(42) self.assertTrue(repr(t).startswith("<")) self.assertTrue(repr(t).endswith(">")) self.assertIn("id=%d, shape=(), dtype=int32, numpy=42" % t._id, repr(t)) def testZeroSizeTensorStr(self): t = _create_tensor(np.zeros(0, dtype=np.float32)) self.assertIn("[], shape=(0,), dtype=float32", str(t)) def testZeroSizeTensorRepr(self): t = _create_tensor(np.zeros(0, dtype=np.float32)) self.assertTrue(repr(t).startswith("<")) self.assertTrue(repr(t).endswith(">")) self.assertIn("id=%d, shape=(0,), dtype=float32, numpy=%r" % (t._id, t.numpy()), repr(t)) def testStringTensor(self): t_np_orig = np.array([[b"a", b"ab"], [b"abc", b"abcd"]]) t = _create_tensor(t_np_orig) t_np = t.numpy() self.assertTrue(np.all(t_np == t_np_orig), "%s vs %s" % (t_np, t_np_orig)) def testIterateOverTensor(self): l = [[1, 2], [3, 4]] t = _create_tensor(l) for list_element, tensor_element in zip(l, t): self.assertAllEqual(list_element, tensor_element.numpy()) @test_util.run_gpu_only def testStringTensorOnGPU(self): with ops.device("/device:GPU:0"): with self.assertRaisesRegexp( RuntimeError, "Can't copy Tensor with type string to device"): _create_tensor("test string") def testInvalidUTF8ProducesReasonableError(self): if sys.version_info[0] < 3: self.skipTest("Test is only valid in python3.") with self.assertRaises(UnicodeDecodeError): io_ops.read_file(b"\xff") @test_util.run_in_graph_and_eager_modes def testConvertToTensorPreferredDtypeIsRespected(self): self.assertEqual( ops.convert_to_tensor(0.5, preferred_dtype=dtypes.int32).dtype, dtypes.float32) self.assertEqual( ops.convert_to_tensor(0.5, preferred_dtype=dtypes.float64).dtype, dtypes.float64) @test_util.run_in_graph_and_eager_modes def testCompatibility(self): integer_types = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] # Floats are not compatible with ints for t in integer_types: with self.assertRaises(TypeError): constant_op.constant(0.5, dtype=t) # Ints compatible with floats self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.float16)), 5.0) self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.float32)), 5.0) self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.float64)), 5.0) self.assertEqual( self.evaluate(constant_op.constant(5, dtype=dtypes.bfloat16)), 5.0) # Ints and floats are compatible with complex types self.assertEqual( constant_op.constant([[1.0]], dtype=dtypes.complex128).dtype, dtypes.complex128) self.assertEqual( constant_op.constant([[1]], dtype=dtypes.complex128).dtype, dtypes.complex128) # Quantized types are not compatible with floats quantized_types = [dtypes.qint16, dtypes.qint32, dtypes.qint8, dtypes.quint16, dtypes.quint8] for t in quantized_types: with self.assertRaises(TypeError): constant_op.constant(0.5, dtype=t) # TODO(b/118402529): quantized types are broken in eager. @test_util.run_in_graph_and_eager_modes def testCConvertToTensor(self): with self.assertRaises(TypeError): _ = constant_op.constant(0) < 0.5 @test_util.run_in_graph_and_eager_modes def testConvertToTensorAllowsOverflow(self): _ = ops.convert_to_tensor(123456789, dtype=dtypes.uint8) @test_util.assert_no_new_pyobjects_executing_eagerly @test_util.run_in_graph_and_eager_modes def testConvertToTensorNumpyZeroDim(self): for np_type, dtype in [(np.int32, dtypes.int32), (np.half, dtypes.half), (np.float32, dtypes.float32)]: x = ops.convert_to_tensor([np.array(65, dtype=np_type), np.array(16, dtype=np_type)]) self.assertEqual(x.dtype, dtype) self.assertAllEqual(x, [65, 16]) @test_util.assert_no_new_pyobjects_executing_eagerly @test_util.run_in_graph_and_eager_modes def testConvertToTensorNumpyScalar(self): x = ops.convert_to_tensor( [np.array(321, dtype=np.int).item(), np.array(16, dtype=np.int).item()]) self.assertAllEqual(x, [321, 16]) def testEagerTensorError(self): with self.assertRaisesRegexp( TypeError, "Cannot convert .* to EagerTensor of dtype .*"): _ = ops.convert_to_tensor(1., dtype=dtypes.int32) def testEagerLargeConstant(self): for t in [dtypes.uint64, dtypes.uint32, dtypes.int32, dtypes.int64]: self.assertEqual( constant_op.constant(t.max, dtype=t).numpy(), t.max) self.assertEqual( constant_op.constant(t.min, dtype=t).numpy(), t.min) def test_numpyIsView(self): t = constant_op.constant([0.0]) t._numpy()[0] = 42.0 self.assertAllClose(t, constant_op.constant([42.0])) def test_numpyFailsForResource(self): v = variables.Variable(42) with self.assertRaisesRegex(ValueError, "Cannot convert .+ resource"): v._handle._numpy() def testMemoryviewFailsForResource(self): v = variables.Variable(42) with self.assertRaisesRegex(BufferError, "Cannot convert .+ resource"): np.asarray(memoryview(v._handle)) def testMemoryviewIsReadonly(self): t = constant_op.constant([0.0]) self.assertTrue(memoryview(t).readonly) @test_util.assert_no_new_pyobjects_executing_eagerly def testMemoryviewScalar(self): t = constant_op.constant(42.0) self.assertAllEqual( np.array(memoryview(t)), np.array(42.0, dtype=np.float32)) @test_util.assert_no_new_pyobjects_executing_eagerly def testMemoryviewEmpty(self): t = constant_op.constant([], dtype=np.float32) self.assertAllEqual(np.array(memoryview(t)), np.array([])) @test_util.run_gpu_only @test_util.assert_no_new_pyobjects_executing_eagerly def testMemoryviewCopyToCPU(self): with ops.device("/device:GPU:0"): t = constant_op.constant([0.0]) self.assertAllEqual( np.array(memoryview(t)), np.array([0.0], dtype=np.float32)) class TFETensorUtilTest(test_util.TensorFlowTestCase): def testListOfThree(self): t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32) t2 = _create_tensor([[1, 2, 5], [3, 4, 5]], dtype=dtypes.int32) t3 = _create_tensor([[1], [3], [5], [6]], dtype=dtypes.int32) r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 0) self.assertAllEqual(np.array([3, 2, 4]), r.numpy()) r = pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2, t3], 1) self.assertAllEqual(np.array([2, 3, 1]), r.numpy()) def testEmptyTensorList(self): a = pywrap_tensorflow.TFE_Py_TensorShapeSlice([], 0) self.assertTrue(isinstance(a, ops.EagerTensor)) self.assertEqual(0, a.numpy().size) def testTensorListContainsNonTensors(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( TypeError, r"Expected a list of EagerTensors but element 1 has type \"str\""): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, "abc"], 0) with self.assertRaisesRegexp( TypeError, r"Expected a list of EagerTensors but element 0 has type \"int\""): pywrap_tensorflow.TFE_Py_TensorShapeSlice([2, t1], 0) def testTensorListNotList(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( TypeError, r"tensors argument must be a list or a tuple. Got.*EagerTensor"): pywrap_tensorflow.TFE_Py_TensorShapeSlice(t1, -2) def testNegativeSliceDim(self): t1 = _create_tensor([1, 2], dtype=dtypes.int32) with self.assertRaisesRegexp( ValueError, r"Slice dimension must be non-negative. Got -2"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], -2) def testUnicode(self): self.assertEqual(constant_op.constant(u"asdf").numpy(), b"asdf") def testFloatTensor(self): self.assertEqual(dtypes.float64, _create_tensor(np.float64()).dtype) self.assertEqual(dtypes.float32, _create_tensor(np.float32()).dtype) self.assertEqual(dtypes.float16, _create_tensor(np.float16()).dtype) self.assertEqual(dtypes.float32, _create_tensor(0.0).dtype) def testSliceDimOutOfRange(self): t1 = _create_tensor([[1, 2], [3, 4], [5, 6]], dtype=dtypes.int32) t2 = _create_tensor([1, 2], dtype=dtypes.int32) t3 = _create_tensor(2, dtype=dtypes.int32) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(2\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 2"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1], 2) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(1\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 1"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2], 1) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(1\) must be smaller than rank of all tensors, " "but tensor at index 1 has rank 1"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t1, t2], 1) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(0\) must be smaller than rank of all tensors, " "but tensor at index 0 has rank 0"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t3], 0) with self.assertRaisesRegexp( IndexError, r"Slice dimension \(0\) must be smaller than rank of all tensors, " "but tensor at index 2 has rank 0"): pywrap_tensorflow.TFE_Py_TensorShapeSlice([t2, t1, t3], 0) @test_util.assert_no_new_pyobjects_executing_eagerly def testTensorDir(self): t = array_ops.zeros(1) t.test_attr = "Test" instance_dir = dir(t) type_dir = dir(ops.EagerTensor) # Monkey patched attributes should show up in dir(t) self.assertIn("test_attr", instance_dir) instance_dir.remove("test_attr") self.assertEqual(instance_dir, type_dir) def testNonRectangularPackAsConstant(self): l = [array_ops.zeros((10, 1)).numpy(), array_ops.zeros(1).numpy()] with self.assertRaisesRegexp( ValueError, "non-rectangular Python sequence"): constant_op.constant(l) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/tensor_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Experimental API for TensorFlow's "Eager" mode of execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow from tensorflow.python.framework import errors # Trace of execution and memory usage. _active_trace = None def _status_to_exception(code, message): try: error_class = errors.exception_type_from_error_code(code) return error_class(None, None, message) except KeyError: return errors.UnknownError(None, None, message, code) class _NotOkStatusException(Exception): """Exception class to handle not ok Status.""" def __init__(self, message, code): super(_NotOkStatusException, self).__init__() self.message = message self.code = code def __str__(self): e = _status_to_exception(self.code, self.message) return "%s: %s" % (e.__class__.__name__, e) pywrap_tensorflow.TFE_Py_RegisterExceptionClass(_NotOkStatusException) class _FallbackException(Exception): """Exception class to handle fallback from the fastpath. The fastpath that we refer to here is the one implemented to reduce per-op overheads (TFE_Py_FastPathExecute_C). If the conditions for executing the op on the fastpath are not met, we fallback to a safer (and more complete) slowpath, and this Exception is raised to signal that transition. """ pass class _SymbolicException(Exception): """Exception class to handle use of symbolic tensors when executing eagerly. `keras.Input()` creates symbolic tensors (in a FuncGraph managed by the Keras backend) while in eager execution. This exception is used to identify this case (raised in `convert_to_tensor` cause generated functions for ops to construct graphs instead of executing the kernel). """ pass pywrap_tensorflow.TFE_Py_RegisterFallbackExceptionClass(_FallbackException)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/core.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from absl.testing import parameterized import numpy as np from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.layers.pooling import max_pooling3d from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_grad # pylint: disable=unused-import from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.training import training class BackpropTest(test.TestCase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testAggregateGradients(self): def fn(x): ind1 = constant_op.constant(np.array([0, 1])) ind2 = constant_op.constant(np.array([2, 3])) ind3 = constant_op.constant(np.array([1, 3])) g1 = embedding_ops.embedding_lookup(x, ind1) g2 = embedding_ops.embedding_lookup(x, ind2) g3 = embedding_ops.embedding_lookup(x, ind3) return g1 * g2 * g3 var_np = np.random.rand(4, 2).astype(np.float32) var = constant_op.constant(var_np) grad = backprop.gradients_function(fn, [0])(var)[0] grad = self.evaluate(ops.convert_to_tensor(grad)) if not context.executing_eagerly(): tf_var = array_ops.constant(var_np, dtypes.float32) tf_ind1 = array_ops.constant([0, 1]) tf_ind2 = array_ops.constant([2, 3]) tf_ind3 = array_ops.constant([1, 3]) tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1) tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2) tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3) tf_y = tf_g1 * tf_g2 * tf_g3 tf_grad = gradients.gradients(tf_y, [tf_var])[0] tf_dense_grad = math_ops.unsorted_segment_sum( tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0]) self.assertAllClose(grad, self.evaluate(tf_dense_grad)) @test_util.run_in_graph_and_eager_modes def testAggregateGradientsWithTensor(self): def fn(x): ind1 = constant_op.constant(np.array([0, 1])) # A mixture of IndexedSlices and dense tensor to aggregate. g1 = embedding_ops.embedding_lookup(x, ind1) g2 = math_ops.reduce_sum(x * constant_op.constant(2.0)) return g1 * g2 var_np = np.random.rand(4, 2).astype(np.float32) var = constant_op.constant(var_np) grad = backprop.gradients_function(fn, [0])(var)[0] grad = self.evaluate(ops.convert_to_tensor(grad)) if not context.executing_eagerly(): tf_var = array_ops.constant(var_np, dtypes.float32) tf_ind1 = array_ops.constant([0, 1]) tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1) tf_g2 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1)) tf_y = tf_g1 * tf_g2 tf_grad = gradients.gradients(tf_y, [tf_var])[0] self.assertAllClose(grad, tf_grad) def testImplicitGradWithResourceVariable(self): x = resource_variable_ops.ResourceVariable( initial_value=constant_op.constant(1.0), name='x') def fn(): b = constant_op.constant(2.0) c = math_ops.add(x.value(), b) return math_ops.add(c, constant_op.constant(3.0)) grads_and_vars = backprop.implicit_grad(fn)() self.assertAllEqual(grads_and_vars[0][0], 1.0) self.assertAllEqual(id(grads_and_vars[0][1]), id(x)) @parameterized.named_parameters( [('Function', def_function.function), ('NoFunction', lambda f: f)]) def testIdentityBehaviorConsistent(self, decorator): @decorator def f(x): x1 = array_ops.identity(x) with backprop.GradientTape() as t: t.watch(x) t.watch(x1) y1 = x * 2. y2 = x1 * 3. loss = y1 + y2 return t.gradient(loss, [x, x1]) self.assertAllClose([2., 3.], f(constant_op.constant(10.))) def testGradientInsideLoop(self): with ops.Graph().as_default(): v = resource_variable_ops.ResourceVariable(1.0) def body(_): _ = v + 1.0 # This reads the variable inside the loop context with backprop.GradientTape() as t: result = v * 2 self.assertIsNotNone(t.gradient(result, v)) return 1.0 control_flow_ops.while_loop(lambda i: False, body, [1.0]) def testWhereGradient(self): # Note: where is special because only some of its arguments are of # differentiable dtypes. def f(x): return array_ops.where(x < 10, x, x * x) g = backprop.gradients_function(f) self.assertAllEqual(g(5.)[0], 1.0) self.assertAllEqual(g(50.)[0], 100.0) def testTwoTargets(self): with backprop.GradientTape() as t: x = constant_op.constant(3.0) y = constant_op.constant(2.0) t.watch([x, y]) xx = 2 * x yy = 3 * y dx, dy = t.gradient([xx, yy], [x, y]) self.assertAllEqual(dx, 2.0) self.assertAllEqual(dy, 3.0) def testCustomGradientEmptyError(self): @custom_gradient.custom_gradient def identity(x): def grad(_): return [] # This return value is wrong! return x, grad x = variables.Variable(1.0) with backprop.GradientTape() as t: y = identity(x) with self.assertRaises(ValueError): t.gradient(y, [x]) def testOutputGradUsedInComputation(self): with backprop.GradientTape() as t: x = constant_op.constant(3.0) y = constant_op.constant(2.0) t.watch([x, y]) loss = x * y dx, = t.gradient([loss, x], [x], output_gradients=[1.0, 2.0]) self.assertAllEqual(dx, 4.0) def testDy(self): def f(x): return x grad_fn = backprop.gradients_function(f) self.assertAllEqual(2., grad_fn(1., dy=2.)[0]) def testGradientInteger(self): def f(x): return x + x int_tensor = constant_op.constant(1) self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None) def testErrors(self): @custom_gradient.custom_gradient def f(x): def grad(_): raise RuntimeError('x') return x, grad # TODO(apassos) raise the right error here with self.assertRaises(RuntimeError): backprop.gradients_function(f)(constant_op.constant(1.0)) def testGradientsFunctionInCustomGradient(self): @custom_gradient.custom_gradient def f(x): (y,) = backprop.gradients_function(lambda x: x * x)(x) def grad(dy): return [2 * dy] return y, grad self.assertAllEqual(f(1.0), 2.0) def testImplicitGradOverEmbeddingLookup(self): batch_size = 8 embedding_size = 512 vocab_size = 1000 lrn_rate = 0.1 random_init = random_ops.random_uniform([vocab_size, embedding_size]) x = array_ops.ones((batch_size), dtypes.int64) embedding = resource_variable_ops.ResourceVariable( initial_value=random_init, dtype=dtypes.float32, name='embedding') def f(): embedded_x = embedding_ops.embedding_lookup(embedding, x) return constant_op.constant(1.0, dtypes.float32) - embedded_x grad = backprop.implicit_grad(f)()[0][0] opt = training.GradientDescentOptimizer(lrn_rate) with ops.Graph().as_default(), self.cached_session(): tf_x = array_ops.ones((batch_size), dtypes.int64) # TODO(ashankar,apassos): Change to ResourceVariable. tf_embedding = variables.Variable( random_init.numpy(), name='tf_embedding') tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x) tf_y = 1.0 - tf_embedded_x tf_grad = gradients.gradients(tf_y, [tf_embedding])[0] tf_opt = training.GradientDescentOptimizer(0.1) tf_embedding.initializer.run() self.assertAllClose(tf_grad.indices.eval(), grad.indices) self.assertAllClose(tf_grad.values.eval(), grad.values) tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run() expected = self.evaluate(tf_embedding) opt.apply_gradients([(grad, embedding)]) self.assertAllClose(expected, embedding.read_value()) def testImplicitGradOrdering(self): v0 = resource_variable_ops.ResourceVariable(1.0) v1 = resource_variable_ops.ResourceVariable(2.0) def f(): x = v1 * v1 y = v0 * v0 return x + y grads = backprop.implicit_grad(f)() ordered_variables = [x[1] for x in grads] self.assertIs(ordered_variables[0], v0) self.assertIs(ordered_variables[1], v1) def testTapeNoOpGradient(self): x = constant_op.constant(3.0) with backprop.GradientTape() as t: t.watch(x) y = x self.assertEqual(t.gradient(y, x).numpy(), 1.0) def testTapeIdentityGradientIsIdentity(self): x = constant_op.constant(3.0) with backprop.GradientTape() as t: t.watch(x) y = array_ops.identity(x) self.assertEqual(t.gradient(y, x).numpy(), 1.0) def testTapeGradientMultiTargetOneIsSource(self): x = constant_op.constant(2.0) with backprop.GradientTape() as t: t.watch(x) y = x*x self.assertEqual(t.gradient([x, y], x).numpy(), 5.0) def testTapeNoOpGradientWithMultiTargetAllSource(self): x = constant_op.constant(3.0) with backprop.GradientTape() as t: t.watch(x) y = x self.assertEqual(t.gradient([y, y], x).numpy(), 2.0) def testTapeNoOpGradientWithMultiTargetMultiSource(self): x = constant_op.constant(3.0) y = constant_op.constant(5.0) with backprop.GradientTape() as t: t.watch(x) t.watch(y) z = y * y self.assertAllEqual(t.gradient([x, y, z], [x, y]), [1.0, 11.0]) def testTapeGradientStringTarget(self): s = constant_op.constant('unknown', dtype=dtypes.string) x = constant_op.constant(3.0) with backprop.GradientTape() as t: t.watch(x) t.watch(s) grads = t.gradient(s, x) self.assertEqual(grads, None) def testTapeNoOpGradientStringSourceAndTarget(self): s = constant_op.constant('unknown', dtype=dtypes.string) with backprop.GradientTape() as t: t.watch(s) grads = t.gradient(s, s) self.assertEqual(grads, None) def testTapeNoOpGradientWithMultiTargetMultiSourceIncludeString(self): x = constant_op.constant(3.0) y = constant_op.constant(5.0) s = constant_op.constant('unknown', dtype=dtypes.string) with backprop.GradientTape() as t: t.watch(x) t.watch(y) t.watch(s) z = y * y grads = t.gradient([x, y, z, s], [x, y, s]) self.assertAllEqual(grads[:2], [1.0, 11.0]) self.assertEqual(grads[2], None) def testTapeNoOpOnVariableIsIdentity(self): v0 = resource_variable_ops.ResourceVariable(1.0) with backprop.GradientTape() as t: y = v0.read_value() self.assertEqual(t.gradient(y, v0).numpy(), 1.0) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testTapeNoOpGradient2By2(self): a_2_by_2 = constant_op.constant(2.0, shape=[2, 2]) with backprop.GradientTape(persistent=True) as tape: tape.watch(a_2_by_2) dy_dy = tape.gradient(a_2_by_2, [a_2_by_2])[0] self.assertAllEqual(dy_dy.numpy(), constant_op.constant(1.0, shape=[2, 2]).numpy()) @test_util.assert_no_new_pyobjects_executing_eagerly def testTapeNoOpGradientMultiTarget2By2(self): a_2_by_2 = constant_op.constant(2.0, shape=[2, 2]) with backprop.GradientTape(persistent=True) as tape: tape.watch(a_2_by_2) dy_dy = tape.gradient([a_2_by_2, a_2_by_2], [a_2_by_2])[0] self.assertAllEqual(dy_dy.numpy(), constant_op.constant(2.0, shape=[2, 2]).numpy()) def testTapeStopRecording(self): with backprop.GradientTape() as t: x = resource_variable_ops.ResourceVariable(1.0) with t.stop_recording(): y = x * x self.assertEqual(t.gradient(y, x), None) def testTapeStopStartRecording(self): with backprop.GradientTape(persistent=True) as t: x = resource_variable_ops.ResourceVariable(1.0) x2 = x * 2 # This should be differentiated through. with t.stop_recording(): y = x2 * x2 z = x2 * x2 self.assertEqual(t.gradient(y, x2), None) # If the x*2 was not differentiated through, this would be 2.0, not 4.0 self.assertEqual(t.gradient(z, x2).numpy(), 4.0) def testTapeReset(self): with backprop.GradientTape() as t: v = resource_variable_ops.ResourceVariable(1.0) loss = v * v t.reset() loss += v * v self.assertAllEqual(t.gradient(loss, v), 2.0) def testPythonMax(self): x = [resource_variable_ops.ResourceVariable(2.), resource_variable_ops.ResourceVariable(3.), resource_variable_ops.ResourceVariable(5.)] with backprop.GradientTape() as t: f = max(x) grad = t.gradient(f, x) self.assertAllEqual(self.evaluate(f), 5.) self.assertAllEqual(self.evaluate(grad), [None, None, 1.0]) def testAutomaticWatchedVariables(self): with backprop.GradientTape() as t: self.assertEqual(0, len(t.watched_variables())) v = resource_variable_ops.ResourceVariable(1.0) loss = v * v self.assertAllEqual([v], t.watched_variables()) t.reset() self.assertEqual(0, len(t.watched_variables())) loss += v * v self.assertAllEqual([v], t.watched_variables()) def testExplicitWatchedVariables(self): with backprop.GradientTape() as t: self.assertEqual(0, len(t.watched_variables())) v = resource_variable_ops.ResourceVariable(1.0) t.watch(v) self.assertAllEqual([v], t.watched_variables()) t.reset() self.assertEqual(0, len(t.watched_variables())) t.watch(v) self.assertAllEqual([v], t.watched_variables()) @test_util.assert_no_new_tensors def testGradientNone(self): def loss(x, l): return math_ops.reduce_mean( nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l), constant_op.constant([0])) logits = constant_op.constant([[0.0, 0.0]]) labels = constant_op.constant([[1.0, 0.0]]) # softmax_cross_entropy_with_logits returns two outputs and in this case the # gradient wrt the second is None. g, = backprop.gradients_function(loss, [0])(logits, labels) self.assertAllEqual(g.numpy(), [[-0.5, 0.5]]) @test_util.run_in_graph_and_eager_modes def testGradientWithinTapeBlock(self): v1 = resource_variable_ops.ResourceVariable(1.) self.evaluate(v1.initializer) with backprop.GradientTape() as t: loss = 2 * v1 grad = t.gradient(loss, v1) self.assertAllEqual(self.evaluate(grad), 2.0) with backprop.GradientTape(persistent=True) as t: loss = 2 * v1 grad = t.gradient(loss, v1) self.assertAllEqual(self.evaluate(grad), 2.0) @test_util.run_in_graph_and_eager_modes def testNestedSelfContexts(self): v1 = resource_variable_ops.ResourceVariable(1.) self.evaluate(v1.initializer) with backprop.GradientTape() as t: with self.assertRaises(ValueError): with t: pass @test_util.assert_no_new_tensors def testSecondGrad(self): def first(x): l = constant_op.constant([[0.0]]) x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x) x = math_ops.reduce_sum(x, constant_op.constant([0])) return x def second(x): grad = backprop.gradients_function(first, [0])(x)[0] return math_ops.reduce_sum(grad, constant_op.constant([0])) f = constant_op.constant([[0.1]]) grad = backprop.gradients_function(second, [0])(f)[0] self.assertAllEqual([[0.0]], grad) @test_util.run_in_graph_and_eager_modes def testWatchingIsTapeLocal(self): x1 = resource_variable_ops.ResourceVariable(2.0, trainable=False) x2 = resource_variable_ops.ResourceVariable(2.0, trainable=False) with backprop.GradientTape() as tape1: with backprop.GradientTape() as tape2: tape1.watch(x1) tape2.watch([x1, x2]) y = x1 ** 3 z = x2 ** 2 dy, dz = tape2.gradient([y, z], [x1, x2]) d2y, d2z = tape1.gradient([dy, dz], [x1, x2]) self.evaluate([x1.initializer, x2.initializer]) self.assertEqual(self.evaluate(d2y), 12.0) self.assertIsNone(d2z) @test_util.assert_no_new_tensors def testMakeVJP(self): def f(x): return x * x wrapped_fn = backprop.make_vjp(f, persistent=False) result, vjp = wrapped_fn(constant_op.constant(3.0)) self.assertAllEqual(result, 9.0) self.assertAllEqual(vjp(2.0)[0], 12.0) def testPersistentMakeVJP(self): def f(x): return x * x wrapped_fn = backprop.make_vjp(f, persistent=True) _, vjp = wrapped_fn(constant_op.constant(3.0)) vjp_result1 = vjp(2.0)[0] vjp_result2 = vjp(2.0)[0] self.assertAllEqual(vjp_result1, vjp_result2, 12.0) @test_util.assert_no_new_tensors def testGradGrad(self): def sq(x): return x * x def grad(x): value = backprop.gradients_function(sq, [0])(x)[0] return value gradgrad = backprop.gradients_function(grad, [0]) self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0) @test_util.assert_no_new_tensors def testGradGradExp(self): def grad(x): value = backprop.gradients_function(math_ops.exp, [0])(x)[0] return value gradgrad = backprop.gradients_function(grad, [0]) self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0) @test_util.assert_no_new_tensors def testStopGradient(self): grad = backprop.gradients_function( lambda x: array_ops.stop_gradient(math_ops.argmax(x))) self.assertAllEqual(grad([0.0])[0], None) @test_util.assert_no_new_tensors def testArgmax(self): def argmax(x): i = math_ops.argmax(x) return array_ops.stop_gradient(i) grad = backprop.gradients_function(argmax) self.assertAllEqual(grad([0.0])[0], None) @test_util.run_gpu_only @test_util.assert_no_new_tensors def testGPU(self): def fn(x): with context.device('/gpu:0'): b = constant_op.constant(2.0) c = math_ops.add(x.gpu(), b) # TODO(apassos): remove cpu below by making TensorVSPace aware # of devices. return math_ops.add(c, constant_op.constant(3.0)).cpu() grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0] self.assertAllEqual(grad, 1.0) @test_util.run_gpu_only @test_util.assert_no_new_tensors def testGPUImplicitGrad(self): with context.device('gpu:0'): v = resource_variable_ops.ResourceVariable( constant_op.constant(1.0), name='v') def f(): with context.device('gpu:0'): return v.read_value() self.assertEqual( backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0) @test_util.assert_no_new_tensors def testCPU(self): def fn(x): b = constant_op.constant(2.0) c = math_ops.add(x, b) return math_ops.add(c, constant_op.constant(3.0)) grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0] self.assertAllEqual(grad, 1.0) @test_util.run_gpu_only @test_util.assert_no_new_tensors def testTensorCopyGPU2CPU2GPU(self): def f(a, b): return a.cpu() + b.cpu() with context.device('/gpu:0'): a = constant_op.constant(1.0) b = constant_op.constant(2.0) grad = backprop.gradients_function(f, [0])(a, b)[0] self.assertAllEqual(grad, 1.0) @test_util.assert_no_new_tensors def testEmptyParams(self): def fn(a, b): return a * b x = constant_op.constant(1.0) y = constant_op.constant(2.0) dx, dy = backprop.gradients_function(fn)(x, y) self.assertAllEqual(dx, y.numpy()) self.assertAllEqual(dy, x.numpy()) @test_util.assert_no_new_tensors def testUnconnectedNone(self): v = resource_variable_ops.ResourceVariable( 1.0, name='testUnconnectedNone') def f(): v.read_value() return constant_op.constant(1.0) self.assertEqual(backprop.implicit_grad(f)()[0][0], None) @test_util.assert_no_new_tensors def testGradientTapeReEnterContext(self): g = backprop.GradientTape() with g: x = constant_op.constant(3.0) g.watch(x) y = 2*x with g: z = 2*y grad = g.gradient(target=z, sources=[x]) self.assertEqual(self.evaluate(grad), [4.0]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTapeRepeatedSource(self): with backprop.GradientTape(persistent=False) as g: x = constant_op.constant(3.0) g.watch(x) y = 2 * x grad = g.gradient(target=y, sources=[x, x]) self.assertEqual(self.evaluate(grad), [2.0, 2.0]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testPersistentGradientTapeRepeatedSource(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) y = constant_op.constant(5.0) g.watch(x) g.watch(y) z = x * x + x * y grad = g.gradient(target=z, sources=[x, x]) self.assertEqual(self.evaluate(grad), [11.0, 11.0]) grad = g.gradient(target=z, sources=[y, x]) self.assertEqual(self.evaluate(grad), [3.0, 11.0]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTapeStructure(self): with backprop.GradientTape(persistent=True) as g: # Using different constant values because constant tensors are # cached, leading to a different gradient then what one might expect. x1 = constant_op.constant(3.0) x2 = constant_op.constant(3.1) x3 = constant_op.constant(3.2) g.watch(x1) g.watch(x2) g.watch(x3) y = x1 + 2 * x2 + 3 * x3 self.assertEqual(self.evaluate(g.gradient(y, x1)), [1.0]) self.assertEqual(self.evaluate(g.gradient(y, (x1,))), (1.0,)) self.assertEqual(self.evaluate(g.gradient(y, (x1, x2))), (1.0, 2.0)) self.assertEqual(self.evaluate(g.gradient(y, [(x1, x2), (x2, x3)])), [(1.0, 2.0), (2.0, 3.0)]) self.assertEqual(self.evaluate(g.gradient(y, (x1, x2, [x1, x3]))), (1.0, 2.0, [1.0, 3.0])) self.assertEqual(self.evaluate(g.gradient(y, [x1, {'x2': x2, 'x3': x3}])), [1.0, {'x2': 2.0, 'x3': 3.0}]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTape(self): with backprop.GradientTape() as g: x = constant_op.constant(3.0) g.watch(x) y = x * x with backprop.GradientTape() as gg: gg.watch(y) z = 2 * y inner_grad = gg.gradient(z, [y])[0] self.assertEqual(self.evaluate(inner_grad), 2.0) y += inner_grad grad = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(grad), 6.0) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGadientTapeCalledOnConstantTarget(self): with backprop.GradientTape() as g: x = variables.Variable([3.0]) y = variables.Variable([2.0]) grad = g.gradient(x, y) self.assertAllEqual(grad, None) @test_util.run_in_graph_and_eager_modes @test_util.run_v1_only('b/120545219') def testGradientTapeWithCond(self): x = constant_op.constant(3.0) def true_fn(): return x def false_fn(): return x * x with backprop.GradientTape() as g: g.watch(x) y = control_flow_ops.cond(x < x, true_fn, false_fn) if not context.executing_eagerly(): with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'): dy = g.gradient(y, [x])[0] else: dy = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(dy), 6.0) @test_util.run_in_graph_and_eager_modes @test_util.run_v1_only('b/120545219') def testGradientTapeWithWhileLoop(self): i = constant_op.constant(1) x = constant_op.constant(2.) def cond(i, _): return i < 3 def body(i, x): return i + 1, x * 2 with backprop.GradientTape() as g: g.watch([x]) _, y = control_flow_ops.while_loop(cond, body, [i, x]) if not context.executing_eagerly(): with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'): dy = g.gradient(y, [x])[0] else: dy = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(dy), 4.0) @test_util.assert_no_new_tensors def testGradientTapeGradientCalledMultipleTimes(self): with backprop.GradientTape() as g: x = constant_op.constant(3.0) g.watch(x) y = x * x z = y * y g.gradient(z, [x]) with self.assertRaisesRegexp( RuntimeError, 'GradientTape.gradient can only be called once'): g.gradient(y, [x]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes @test_util.run_v1_only('b/120545219') def testPersistentTape(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) g.watch(x) y = x * x z = y * y dz_dx = g.gradient(z, [x])[0] self.assertEqual(self.evaluate(dz_dx), 4 * 3 * 3 * 3) dy_dx = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(dy_dx), 2 * 3) del g @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testHigherOrderGradient(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) g.watch(x) y = x ** 3 # y := x^3 dy_dx = g.gradient(y, x) # dy/dx := 3x^2 d2y_dx2 = g.gradient(dy_dx, x) # d2y/dx2 := 6x d3y_dx3 = g.gradient(d2y_dx2, x) # d3y/dx3 := 6 x = 3 self.assertEqual(self.evaluate(y), x ** 3) self.assertEqual(self.evaluate(dy_dx), 3 * x ** 2) self.assertEqual(self.evaluate(d2y_dx2), 6 * x) self.assertEqual(self.evaluate(d3y_dx3), 6) del g @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testPersistentNestedTape(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) g.watch(x) y = x * x with backprop.GradientTape(persistent=True) as gg: gg.watch(y) z = 2 * y for _ in range(2): inner_grad = gg.gradient(z, [y])[0] self.assertEqual(self.evaluate(inner_grad), 2.0) y += inner_grad del gg grad = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(grad), 6.0) grad = g.gradient(z, [x])[0] self.assertEqual(self.evaluate(grad), 12.0) del g @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTapeVariable(self): v = resource_variable_ops.ResourceVariable(1.0, name='v') self.evaluate(v.initializer) with backprop.GradientTape() as g: y = v * v grad = g.gradient(y, [v])[0] self.assertAllEqual(self.evaluate(grad), 2.0) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testNestedGradients(self): x = constant_op.constant(3.0) with backprop.GradientTape() as g: g.watch(x) y = x * x z = y * y dz_dx, dz_dy = g.gradient(z, [x, y]) self.assertEqual(self.evaluate(dz_dx), 108.0) self.assertEqual(self.evaluate(dz_dy), 18.0) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testUnconnectedGradientsDefault(self): x = constant_op.constant(1.0) y = constant_op.constant(3.0) with backprop.GradientTape() as g: g.watch([x, y]) z = y * 2 dz_dx = g.gradient(z, x) self.assertEqual(dz_dx, None) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testUnconnectedGradientsZeros(self): x = constant_op.constant(1.0, shape=[2, 2]) y = constant_op.constant(3.0) with backprop.GradientTape() as g: g.watch([x, y]) z = y * 2 dz_dx = g.gradient(z, x, unconnected_gradients='zero') self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx)) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testUnconnectedGradientsVariablesZeros(self): x = resource_variable_ops.ResourceVariable( constant_op.constant(1., shape=[2, 2])) self.evaluate(x.initializer) y = resource_variable_ops.ResourceVariable(constant_op.constant(3.)) self.evaluate(y.initializer) with backprop.GradientTape() as g: g.watch([x, y]) z = y * 2 dz_dx = g.gradient(z, x, unconnected_gradients='zero') self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx)) @test_util.run_in_graph_and_eager_modes def testUnknownUnconnectedGradientsValueGiven(self): x = constant_op.constant(1.0) y = constant_op.constant(1.0) with backprop.GradientTape() as g: g.watch([x, y]) z = y * 2 with self.assertRaisesRegexp( ValueError, "Unknown value for unconnected_gradients: 'nonsense'"): g.gradient(z, x, unconnected_gradients='nonsense') @test_util.run_in_graph_and_eager_modes def testUnconnectedGradientsNestedDefunZeros(self): @function.defun def f(x): return x * x @function.defun def h(y): z = f(y) return array_ops.stop_gradient(z) x = constant_op.constant(1.0) with backprop.GradientTape() as g: g.watch(x) y = h(x) dy_dx = g.gradient(y, x, unconnected_gradients='zero') self.assertEqual(0.0, self.evaluate(dy_dx)) @test_util.assert_no_new_tensors def testEmptyParamsForValueAndGradFunction(self): def fn(a, b): return a * b val_and_grads_fn = backprop.val_and_grad_function(fn) x = 2.0 y = 3.0 val, (dx, dy) = val_and_grads_fn(x, y) self.assertAllClose(val, x * y) self.assertAllEqual(dx, y) self.assertAllEqual(dy, x) @test_util.assert_no_new_tensors def testNonEmptyParamsForValueAndGradFunction(self): def fn(a, b): return a * b val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1]) x = 2.0 y = 3.0 val, grads = val_and_grad_fn(x, y) self.assertAllClose(val, x * y) self.assertEqual(1, len(grads)) self.assertAllEqual(grads[0], x) @test_util.run_gpu_only @test_util.assert_no_new_tensors def testTensorCopyCPU2GPU2CPU(self): # forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu) # back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu) def f(a, b): with context.device('/gpu:0'): c = math_ops.add(a.gpu(0), b.gpu(0)) return math_ops.add(c.cpu(), constant_op.constant(3.0)) with context.device('/cpu:0'): a = constant_op.constant(1.0) b = constant_op.constant(2.0) grad = backprop.gradients_function(f, [0])(a, b)[0] self.assertAllEqual(grad, 1.0) def testGetAttrType(self): typ = backprop.op_attr_type('Add', 'T') self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE) def testGetAttrList(self): typ = backprop.op_attr_type('MaxPool', 'ksize') self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT]) def testMakeAttrType(self): self.assertEqual(dtypes.float32, backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1)) def testMakeAttrTypeList(self): self.assertEqual([dtypes.float32], backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1])) def testMulType(self): def mul(x): return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access self.assertAllEqual( backprop.gradients_function(mul)(3.0)[0].numpy(), 6.0) def testMakeAttrShape(self): for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]): expected = tensor_shape.TensorShape(s).as_proto() actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s) self.assertEqual( expected, actual, msg=('For shape %r, expected %r != %r actual' % (s, expected, actual))) def testMakeAttrShapeList(self): shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]] self.assertEqual( [tensor_shape.TensorShape(s).as_proto() for s in shape_list], backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list)) def testArgsGradientFunction(self): def f(*args): return args[0] * args[0] grad = backprop.gradients_function(f) self.assertAllEqual(grad(1.0)[0], 2.0) def testPartial(self): def f(x, y): return x * y part = functools.partial(f, constant_op.constant(2.0)) self.assertAllEqual( backprop.gradients_function(part)(constant_op.constant(1.0))[0], 2.0) def testReturnSameThing(self): def f(x): return x, 2 * x self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0) @test_util.assert_no_new_tensors def testExceptionSafety(self): def f(unused_x): raise ValueError() try: backprop.gradients_function(f)(1.0) except ValueError: pass def real_f(x): return x * x self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0) @test_util.assert_no_new_tensors def testMultiValueConvertToTensor(self): x = resource_variable_ops.ResourceVariable( initial_value=array_ops.constant([1.0]), name='x') def fn(): a = math_ops.add(x.value(), 1.0) # Make sure convert_to_tensor works correctly with list of TensorNodes. b = array_ops.stack([a, a], axis=0) return math_ops.reduce_mean(b) grad = backprop.implicit_grad(fn)()[0][0] self.assertAllEqual([1.0], grad) def testOutput(self): def multiout(x): return x + 2, x * x x = constant_op.constant([0.0, 1.0, 2.0]) grad = backprop.gradients_function(multiout)(x)[0] self.assertAllEqual([1.0, 3.0, 5.0], grad) def testMultiValuePreservesIfNotDiffedAgainst(self): def tfe_conv2d(timage, tkernel, conv2dstrides): return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME') i = constant_op.constant([[[[1.0]]]]) k = constant_op.constant([[[[2.0]]]]) s = [1, 1, 1, 1] grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0] self.assertAllEqual([[[[2.0]]]], grad) def testSameObjectForMultipleArguments(self): def f(x, y): return math_ops.multiply(x, y) g = backprop.gradients_function(f) def np_g(x, y): dx, dy = g(x, y) return [dx.numpy(), dy.numpy()] x = constant_op.constant(1.) self.assertAllEqual([1., 1.], np_g(x, x)) x = 1. self.assertAllEqual([1., 1.], np_g(x, x)) x = constant_op.constant([[1.]]) self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x)) x = [[1.]] self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x)) v = resource_variable_ops.ResourceVariable( initial_value=1., name='testSameObjectForMultipleArguments.Variable') self.assertAllEqual([1., 1.], np_g(v, v)) @test_util.assert_no_new_tensors def testImplicitGradientsCustomGradientAndCachedVariableValue(self): @custom_gradient.custom_gradient def my_square(x): result = math_ops.square(x) def grad(dr): return 2 * dr * x + 1 return result, grad x = resource_variable_ops.ResourceVariable( initial_value=3., name='X.' + self.id()) def f(): return my_square(x) g = backprop.implicit_grad(f) grads_and_vars = g() self.assertEqual(1, len(grads_and_vars)) grad, var = grads_and_vars[0] self.assertAllEqual(7, grad) self.assertAllEqual(x, var) def testJacobianCustomGradient(self): class MyCallable(object): def __init__(self): self.a = variables.Variable(1.) self.b = variables.Variable(2.) self.c = variables.Variable(3.) def __call__(self, x): return self.a * x * x + self.b * x + self.c @def_function.function def call(c, x): @custom_gradient.custom_gradient def _call(): y = c(x) def grad(dy, variables=None): # pylint: disable=redefined-outer-name with backprop.GradientTape(persistent=True) as g: g.watch(variables) y = c(x) grad_vars = [ 2 * math_ops.reduce_sum(dy * g.jacobian(y, v)) for v in variables ] del g return (), grad_vars return y, grad return _call() c = MyCallable() x = constant_op.constant([1., 2., 3.]) with backprop.GradientTape(persistent=True) as g: g.watch([c.a, c.b, c.c]) y = call(c, x) self.assertAllEqual(g.gradient(y, x), None) @test_util.assert_no_new_tensors def testCustomGradient(self): @custom_gradient.custom_gradient def my_mul(x, y): result = x*y def grad(dr): return [dr*y, dr*x] return result, grad lr = 0.25 x = resource_variable_ops.ResourceVariable(2., name='x') def loss(x): return my_mul(2., x.read_value()) loss_grads_fn = backprop.implicit_val_and_grad(loss) losses = [] for _ in range(5): loss, grads_and_vars = loss_grads_fn(x) losses.append(loss.numpy()) for (grad, var) in grads_and_vars: var.assign_sub(lr*grad) self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.]) @test_util.assert_no_new_tensors def testCustomGradientIdentity(self): @custom_gradient.custom_gradient def my_identity(x): def grad(dresult): return [2 * dresult] return x, grad self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0) def testDifferentiatingFunctionThatReturnsNone(self): def fn(x, y): result = x*y # pylint: disable=unused-variable x = constant_op.constant(1) y = constant_op.constant(2) loss_grads_fn = backprop.implicit_val_and_grad(fn) with self.assertRaisesRegexp( ValueError, 'Cannot differentiate a function that returns None; ' 'did you forget to return a value from fn?'): loss_grads_fn(x, y) val_and_grads_fn = backprop.val_and_grad_function(fn) with self.assertRaisesRegexp( ValueError, 'Cannot differentiate a function that returns None; ' 'did you forget to return a value from fn?'): val_and_grads_fn(x, y) def testZerosCacheDoesntLeakAcrossGraphs(self): with ops.Graph().as_default(): def get_grad(): with ops.Graph().as_default(), self.cached_session(): t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4)) x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4)) with backprop.GradientTape() as tape: tape.watch(x) x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1) y1 = x1**2 y = array_ops.concat([y1, t], axis=1) return self.evaluate(tape.gradient(y, x)) grad1 = get_grad() grad2 = get_grad() self.assertAllEqual(grad1, grad2) @test_util.run_in_graph_and_eager_modes def testSelectivelyWatchVariables(self): x1 = resource_variable_ops.ResourceVariable(1.0) x2 = resource_variable_ops.ResourceVariable(1.0) with backprop.GradientTape(watch_accessed_variables=False) as tape: tape.watch(x2) y = x1**2 z = x2**3 self.assertTupleEqual(tape.watched_variables(), (x2,)) dy, dz = tape.gradient([y, z], [x1, x2]) self.evaluate([x1.initializer, x2.initializer]) self.assertIsNone(dy) self.assertEqual(self.evaluate(dz), 3.0) @test_util.run_in_graph_and_eager_modes def testDifferentiatingScalarCache(self): # In the following test, if x2 = x1 (i.e the objects are the exact same), # then y is essentially, 2*x1, and dy/dx1 = 2. # When we had a pure scalar cache in eager, this would be the case. This # test prevents us from going back to that case. with backprop.GradientTape(persistent=False) as g: x1 = constant_op.constant(3.0) x2 = constant_op.constant(3.0) g.watch(x1) g.watch(x2) y = x1 + x2 grad = g.gradient(target=y, sources=[x1]) self.assertEqual(self.evaluate(grad), [1.0]) def testVariablesAndConstantsProduceTheSameGradients(self): # In the following test, differentiating [y, z] against [a, b] gives: # (dy/da + dz/da, dy/db + dz/db). # If a and b are the same constant, dz/da will not be 0 (which it should # be). # This is solved by using variable since doing a read_value on a tensor will # produce a new tensor and corresponding TensorHandle, and not reuse the # same tensor (which would happen if we are using a cache and reusing # EagerTensor objects). def get_grads(a, b): with backprop.GradientTape() as tape: tape.watch([a, b]) y = a**3 z = b**2 return tape.gradient([y, z], [a, b]) gradients_constants = get_grads( constant_op.constant(2.0), constant_op.constant(2.0)) gradients_variables = get_grads( resource_variable_ops.ResourceVariable(2.0), resource_variable_ops.ResourceVariable(2.0)) self.assertAllEqual(gradients_constants, gradients_variables) def testUnknownShapes(self): with ops.Graph().as_default(): with backprop.GradientTape() as tape: a = array_ops.placeholder(dtype=dtypes.float32, shape=None) tape.watch(a) b = a**3 db_da = tape.gradient(b, a) with self.cached_session() as sess: self.assertEqual((8.0, 12.0), sess.run((b, db_da), feed_dict={a: 2.0})) @test_util.run_in_graph_and_eager_modes def testCustomGradientInEagerAndGraph(self): @custom_gradient.custom_gradient def f(x): y = x * x def grad(dy): return [4 * dy] return y, grad with backprop.GradientTape() as t: c = constant_op.constant(1.0) t.watch(c) g = f(c) self.assertAllEqual(self.evaluate(t.gradient(g, c)), 4.0) @test_util.run_in_graph_and_eager_modes def testMaxPooling3DGradient(self): if test.is_built_with_rocm(): self.skipTest('Pooling with 3D tensors is not supported in ROCm') def forward(a): r = max_pooling3d(a, pool_size=pool_size, strides=strides, padding='SAME') return r input_sizes = [1, 3, 2, 4, 1] pool_size = (2, 2, 1) strides = (1, 1, 1) total_size = np.prod(input_sizes) x = np.arange(1, total_size + 1, dtype=np.float32) aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32) da = backprop.gradients_function(forward)(aa) if not context.executing_eagerly(): tf_aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32) tf_max = max_pooling3d( tf_aa, pool_size=pool_size, strides=strides, padding='SAME') tf_da = gradients.gradients(tf_max, [tf_aa]) self.assertAllEqual(da[0], tf_da[0].eval()) @test_util.run_in_graph_and_eager_modes def testWatchBadThing(self): g = backprop.GradientTape() with self.assertRaisesRegexp(ValueError, 'ndarray'): g.watch(np.array(1.)) class JacobianTest(test.TestCase): def _jacobian(self, experimental_use_pfor): persistent = context.executing_eagerly and not experimental_use_pfor with backprop.GradientTape(persistent=persistent) as g: x = constant_op.constant([1., 2.]) y = constant_op.constant([3., 4.]) g.watch(x) g.watch(y) z = x * x * y jacobian = g.jacobian(z, [x, y], experimental_use_pfor=experimental_use_pfor) answer = [array_ops.diag(2 * x * y), array_ops.diag(x * x)] return jacobian, answer @test_util.run_v1_only('b/120545219') def testPfor(self): jacobian, answer = self._jacobian(experimental_use_pfor=True) for j, a in zip(jacobian, answer): self.assertAllEqual(a, j) @test_util.run_v1_only('b/120545219') def testWhileLoop(self): jacobian, answer = self._jacobian(experimental_use_pfor=False) for j, a in zip(jacobian, answer): self.assertAllEqual(a, j) @test_util.run_v1_only('b/120545219') def testPforDefun(self): @function.defun def _f(): return self._jacobian(experimental_use_pfor=True) jacobian, answer = _f() for j, a in zip(jacobian, answer): self.assertAllEqual(a, j) @test_util.run_v1_only('b/120545219') def testWhileLoopDefun(self): @function.defun def _f(): return self._jacobian(experimental_use_pfor=False) jacobian, answer = _f() for j, a in zip(jacobian, answer): self.assertAllEqual(a, j) @test_util.run_v1_only('b/120545219') def testPersistentTape(self): if not context.executing_eagerly(): return with backprop.GradientTape() as g: x = constant_op.constant([1.0, 2.0]) g.watch(x) y = x * x with self.assertRaisesRegexp(RuntimeError, 'persistent'): g.jacobian(y, x, experimental_use_pfor=False) @test_util.run_v1_only('b/120545219') def testPforException(self): var = variables.Variable([1.]) @custom_gradient.custom_gradient def op(x): def grad(_): # Note that we perform a stateful operation here that will not be # compatible with parallel for construct. with ops.control_dependencies( [var.assign(random_ops.random_uniform([1]))]): return constant_op.constant(1.) return x, grad with backprop.GradientTape() as g: x = constant_op.constant([1., 2.]) g.watch(x) y = op(x) with self.assertRaisesRegexp(ValueError, 'No converter'): g.jacobian(y, x, experimental_use_pfor=True) @test_util.run_v1_only('b/120545219') def test_parallel_iterations(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant([[1., 2], [3, 4]]) g.watch(x) y = math_ops.matmul(x, x) self.assertAllClose(g.jacobian(y, x, parallel_iterations=2), g.jacobian(y, x, parallel_iterations=3)) @test_util.run_in_graph_and_eager_modes def test_nested_jacobian(self): if context.executing_eagerly(): # TODO(agarwal): b/128842926 self.skipTest('Conversion of function calls not implemented yet.') x = array_ops.ones((10, 2)) with backprop.GradientTape(persistent=False) as g: g.watch(x) with backprop.GradientTape(persistent=False) as gg: gg.watch(x) y = math_ops.reduce_sum(math_ops.square(x)) dy_x = gg.jacobian(y, x) dy_xx = g.batch_jacobian(dy_x, x) dy_xx_answer = [[[2., 0], [0, 2.]]] * 10 self.assertAllClose(dy_xx_answer, self.evaluate(dy_xx)) @test_util.run_in_graph_and_eager_modes def test_indexed_slices(self): with backprop.GradientTape(persistent=True) as g: inp = random_ops.random_uniform([3, 2]) g.watch(inp) output = nn.embedding_lookup(inp, [0, 2]) self.assertAllClose( g.jacobian(output, inp, experimental_use_pfor=True), g.jacobian(output, inp, experimental_use_pfor=False)) @test_util.run_all_in_graph_and_eager_modes class BatchJacobianTest(test.TestCase, parameterized.TestCase): def _batch_jacobian(self, experimental_use_pfor): persistent = context.executing_eagerly and not experimental_use_pfor with backprop.GradientTape(persistent=persistent) as g: x = constant_op.constant([[1., 2.], [3., 4.]]) y = constant_op.constant([[3., 4.], [5., 6.]]) g.watch(x) z = x * x * y batch_jacobian = g.batch_jacobian( z, x, experimental_use_pfor=experimental_use_pfor) answer = array_ops.stack([array_ops.diag(2 * x[0] * y[0]), array_ops.diag(2 * x[1] * y[1])]) return batch_jacobian, answer def testPfor(self): batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=True) self.assertAllEqual(answer, batch_jacobian) def testWhileLoop(self): batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=False) self.assertAllEqual(answer, batch_jacobian) def testPforDefun(self): @function.defun def _f(): return self._batch_jacobian(experimental_use_pfor=True) batch_jacobian, answer = _f() self.assertAllEqual(answer, batch_jacobian) def testWhileLoopDefun(self): @function.defun def _f(): return self._batch_jacobian(experimental_use_pfor=False) batch_jacobian, answer = _f() self.assertAllEqual(answer, batch_jacobian) def testPersistentTape(self): if not context.executing_eagerly(): return with backprop.GradientTape() as g: x = constant_op.constant([[1.0, 2.0]]) g.watch(x) y = x * x with self.assertRaisesRegexp(RuntimeError, 'persistent'): g.batch_jacobian(y, x, experimental_use_pfor=False) def testBadShape(self): x = random_ops.random_uniform([2, 3]) with backprop.GradientTape() as g: y = array_ops.concat([x, x], axis=0) with self.assertRaisesRegexp(ValueError, 'Need first dimension'): g.batch_jacobian(y, x) def testBadInputRank(self): x = random_ops.random_uniform([2]) with backprop.GradientTape() as g: y = random_ops.random_uniform([2, 2]) with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'): g.batch_jacobian(y, x) def testBadOutputRank(self): x = random_ops.random_uniform([2, 2]) with backprop.GradientTape() as g: y = random_ops.random_uniform([2]) with self.assertRaisesRegexp(ValueError, 'must have rank at least 2'): g.batch_jacobian(y, x) def testPforException(self): var = variables.Variable([1.]) @custom_gradient.custom_gradient def op(x): def grad(_): # Note that we perform a stateful operation here that will not be # compatible with parallel for construct. with ops.control_dependencies( [var.assign(random_ops.random_uniform([1]))]): return constant_op.constant(1.) return x, grad with backprop.GradientTape() as g: x = constant_op.constant([[1.], [2.]]) g.watch(x) y = op(x) with self.assertRaisesRegexp(ValueError, 'No converter'): g.batch_jacobian(y, x, experimental_use_pfor=True) def test_parallel_iterations(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant([[1., 2], [3, 4]]) g.watch(x) w = constant_op.constant([[1., 2, 3, 4], [5, 6, 7, 8]]) y = math_ops.matmul(x, w) self.assertAllClose(g.batch_jacobian(y, x, parallel_iterations=2), g.batch_jacobian(y, x, parallel_iterations=3)) @parameterized.parameters( (True, True), (True, False), (False, True), (False, False)) def test_degenerate_shape(self, use_function, use_pfor): def f(x): with backprop.GradientTape(persistent=True) as tape: tape.watch(x) y = x**2 return tape.batch_jacobian(y, x, experimental_use_pfor=use_pfor) if use_function: f = def_function.function(f) self.assertAllEqual([1, 0, 0], array_ops.shape(f(array_ops.zeros([1, 0])))) class AggregateIndexedSlicesGradientsTest(test_util.TensorFlowTestCase): def _assert_indexed_slices_equal(self, left, right): self.assertAllEqual( self.evaluate(ops.convert_to_tensor(left)), self.evaluate(ops.convert_to_tensor(right))) def testNoGradients(self): self.assertIsNone(backprop.aggregate_indexed_slices_gradients([])) def testOneGradient(self): t = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) result = backprop.aggregate_indexed_slices_gradients([t]) self._assert_indexed_slices_equal(t, result) def testMultipleGradients(self): t0 = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) t1 = math_ops._as_indexed_slices( constant_op.constant([[0., 0.], [5, 6], [7., 8.]])) total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]]) result = backprop.aggregate_indexed_slices_gradients([t0, t1]) self._assert_indexed_slices_equal(total, result) def testMultipleGradientsWithNones(self): t0 = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) t1 = math_ops._as_indexed_slices( constant_op.constant([[0., 0.], [5, 6], [7., 8.]])) t3 = None total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]]) result = backprop.aggregate_indexed_slices_gradients([t0, t1, t3]) self._assert_indexed_slices_equal(total, result) def testMixedTensorAndIndexedSlices(self): t0 = math_ops._as_indexed_slices( constant_op.constant([[1., 2.], [0, 0], [3., 4.]])) t1 = constant_op.constant([[0., 0.], [5, 6], [7., 8.]]) total = constant_op.constant([[1., 2.], [5, 6], [10., 12.]]) result = backprop.aggregate_indexed_slices_gradients([t0, t1]) self._assert_indexed_slices_equal(total, result) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/backprop_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for lift_to_graph.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import def_function from tensorflow.python.eager import lift_to_graph from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops as framework_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.util import compat class LiftToGraphTest(test.TestCase): def testCaptureOrdering(self): v1 = resource_variable_ops.ResourceVariable(1.0) v2 = resource_variable_ops.ResourceVariable(2.0) v3 = resource_variable_ops.ResourceVariable(3.0) @def_function.function def fn(): return v1 + v2 + v3 concrete_fn = fn.get_concrete_function() original_captures = concrete_fn.graph.internal_captures outputs = concrete_fn.graph.outputs for _ in range(100): g = func_graph.FuncGraph('lifted') lift_to_graph.lift_to_graph( outputs, g, add_sources=True, handle_captures=True) lifted_captures = g.internal_captures self.assertLen(lifted_captures, 3) for original, lifted in zip(original_captures, lifted_captures): self.assertEqual(original.name, lifted.name) def testClassAttrsRemoved(self): """Tests that _class attrs (from colocate_with()) are removed.""" @def_function.function def fn(): two = constant_op.constant(2.0, name='two') ten = constant_op.constant(10.0, name='ten') twenty = math_ops.multiply(two, ten, name='twenty') three = constant_op.constant(3.0, name='three') with framework_ops.colocate_with(twenty): thirty = math_ops.multiply(three, ten, name='thirty') return ten, twenty, thirty concrete_fn = fn.get_concrete_function() self.assertItemsEqual( # Before lifting, 'fn' has colocation attrs. concrete_fn.graph.get_operation_by_name('thirty').colocation_groups(), [compat.as_bytes('loc:@twenty')]) thirty_out = concrete_fn.graph.outputs[2] g = func_graph.FuncGraph('lifted') lift_to_graph.lift_to_graph([thirty_out], g) # After lifting, colocation attrs are gone. ops = g.get_operations() self.assertItemsEqual([op.name for op in ops], ['three', 'ten', 'thirty', # Lifted from `fn` body. thirty_out.op.name]) # Wrapper for output. for op in ops: with self.assertRaises(ValueError): class_attr = op.get_attr('_class') # Expected not to exist. print('Unexpected class_attr', class_attr, 'on', op.name) self.assertItemsEqual(op.colocation_groups(), # Expect default self-ref. [compat.as_bytes('loc:@%s' % op.name)]) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/lift_to_graph_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.compiler.tests import xla_test from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import variables from tensorflow.python.platform import test class DefFunctionTests(xla_test.XLATestCase): def testVarInitializedInFunction(self): with self.test_scope(): v_holder = [] @def_function.function def add_var(x): if not v_holder: v = variables.Variable([1., 2.]) v_holder.append(v) already_initialized = variables.Variable(3.) with ops.init_scope(): already_initialized.assign(10.) v_holder.append(already_initialized) return v_holder[0] + v_holder[1] + x self.assertAllClose([13., 14.], add_var(constant_op.constant(2.))) if __name__ == "__main__": ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/def_function_xla_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for testing tfe code.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops as _ops from tensorflow.python.platform import test as _test from tensorflow.python.platform.test import * # pylint: disable=wildcard-import # TODO(akshayka): Do away with this file. def main(argv=None): # pylint: disable=function-redefined _ops.enable_eager_execution() _test.main(argv)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import weakref from absl.testing import parameterized import numpy as np from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import forwardprop from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import gradient_checker_v2 from tensorflow.python.ops import math_ops from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.platform import test from tensorflow.python.util import nest _X11_35_DERIVATIVES = [ 1.1 ** 3.5, 3.5 * 1.1 ** 2.5, 3.5 * 2.5 * 1.1 ** 1.5, 3.5 * 2.5 * 1.5 * 1.1 ** 0.5] # TODO(allenl): Move this somewhere useful once forward gradients are stable. def _jvp(f, primals, tangents): """Compute the jacobian of `f` at `primals` multiplied by `tangents`.""" with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(primals, tangents) primals_out = f(*primals) return primals_out, acc.jvp(primals_out) def _jacfwd(f, primals): """Compute the jacobian of `f` at `primals` using forward-mode autodiff.""" jac_flat = [] flat_primals = nest.flatten(primals) tangent_mask = [array_ops.zeros_like(primal) for primal in flat_primals] for primal_index, primal in enumerate(flat_primals): primal_vector = array_ops.reshape(primal, [-1]) primal_vector_length = array_ops.size(primal_vector) jac_columns = [] for element_index in math_ops.range(primal_vector_length): mask = array_ops.one_hot(element_index, primal_vector_length) tangent_mask[primal_index] = array_ops.reshape(mask, array_ops.shape(primal)) jac_columns.append( nest.map_structure( functools.partial(array_ops.reshape, shape=[-1]), _jvp(f, primals, tangent_mask)[1])) jac_flat.append(array_ops.stack(jac_columns, axis=1)) tangent_mask[primal_index] = array_ops.zeros_like(primal) return nest.pack_sequence_as(primals, jac_flat) def _grad(f, argnums=0): """Return a function which computes the gradient of `f`.""" def _f(*params): with backprop.GradientTape() as tape: tape.watch(params) primals_out = f(*params) return tape.gradient( primals_out, params[argnums], unconnected_gradients=UnconnectedGradients.ZERO) return _f def _hvp(f, primals, tangents): """Compute a forward-over-back Hessian-vector product.""" return _jvp(_grad(f), primals, tangents)[1] def _test_gradients(testcase, f, primals, order, delta=1e-3, rtol=1e-2, atol=1e-6): """Tests forward/backward jacobians of `f`'s [0, `order`)-order gradients.""" if order < 1: raise ValueError( "`order` should be a positive integer, got '{}'.".format(order)) if order > 1: _test_gradients( testcase=testcase, f=_grad(f), primals=primals, order=order - 1, delta=delta, rtol=rtol, atol=atol) sym_jac_back, num_jac = gradient_checker_v2.compute_gradient( f, primals, delta=delta) testcase.assertAllClose(num_jac, sym_jac_back, rtol=rtol, atol=atol) # TODO(b/134972215): compute_gradient should use the definition of a Jacobian # matrix on Wikipedia, then this transpose can go away. sym_jac_fwd = nest.map_structure(array_ops.transpose, _jacfwd(f, primals)) testcase.assertAllClose(num_jac, sym_jac_fwd, rtol=rtol, atol=atol) # And the symbolic computations should be much closer. testcase.assertAllClose(sym_jac_back, sym_jac_fwd) class ForwardpropTest(test.TestCase, parameterized.TestCase): def testForwardGradientFunction(self): add_outputs = (constant_op.constant(4.),) vp, = forwardprop._forward_gradient( op_name="Add", attr_tuple=(), inputs=(constant_op.constant(1.), constant_op.constant(3.)), outputs=add_outputs, tangents=( constant_op.constant(1.), constant_op.constant(5.), )) self.assertAllClose(1. + 5., self.evaluate(vp)) mul_outputs = (constant_op.constant([20.]),) vp, = forwardprop._forward_gradient( op_name="Mul", attr_tuple=(), inputs=(constant_op.constant([4.]), constant_op.constant([5.])), outputs=mul_outputs, tangents=( constant_op.constant([2.]), constant_op.constant([3.]), )) self.assertAllClose([2. * 5. + 3. * 4.], self.evaluate(vp)) def testForwardGradientFunctionUsedByAccumulatorForOps(self): previous_fn = forwardprop._forward_gradient try: with forwardprop.ForwardGradientAccumulator() as acc: x = constant_op.constant(1.) acc.watch(x, 2.) y = x + x pywrap_tensorflow.TFE_Py_RegisterForwardGradientFunction( lambda *args, **kwargs: [constant_op.constant(-15.)]) z = x + x self.assertAllClose(4., acc.jvp(y)) self.assertAllClose(-15., acc.jvp(z)) finally: pywrap_tensorflow.TFE_Py_RegisterForwardGradientFunction(previous_fn) @test_util.assert_no_new_pyobjects_executing_eagerly def testFunctionCacheLimited(self): # Every time this test is executed, it will create a slightly larger Tensor # and push it through Add's gradient. Since we check for new pyobjects after # the warmup, retracing each time without cleaning up old traces fails the # test. It works because of experimental_relax_shapes. execution_count = getattr(self, "_execution_count", 0) self._execution_count = execution_count + 1 x = array_ops.zeros([execution_count]) with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(x, array_ops.ones_like(x)) y = x + x self.assertAllClose(2. * array_ops.ones_like(x), acc.jvp(y)) @test_util.assert_no_new_pyobjects_executing_eagerly def testMultipleWatchesAdd(self): x = constant_op.constant(-2.) with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(x, constant_op.constant(10.)) self.assertAllClose(10., acc.jvp(x)) acc.watch(x, constant_op.constant(11.)) self.assertAllClose(21., acc.jvp(x)) y = constant_op.constant(3.) * x self.assertAllClose(21., acc.jvp(x)) self.assertAllClose(21. * 3., acc.jvp(y)) @test_util.assert_no_new_pyobjects_executing_eagerly def testDeadTensorsJVPCleared(self): x = array_ops.ones([100]) x_weak = weakref.ref(x) grad_tensor = constant_op.constant(array_ops.zeros([100])) grad_tensor_weak = weakref.ref(grad_tensor) with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(x, grad_tensor) derived_tensor = constant_op.constant(2.) * x del grad_tensor self.assertAllClose(array_ops.zeros([100]), acc.jvp(x)) del x self.assertIsNone(x_weak()) self.assertIsNone(grad_tensor_weak()) derived_tensor_weak = weakref.ref(derived_tensor) derived_tensor_grad = acc.jvp(derived_tensor) derived_tensor_grad_weak = weakref.ref(derived_tensor_grad) del derived_tensor del derived_tensor_grad self.assertIsNone(derived_tensor_weak()) self.assertIsNone(derived_tensor_grad_weak()) @test_util.assert_no_new_pyobjects_executing_eagerly def testJVPManual(self): primal, tangent = _jvp(math_ops.sin, (constant_op.constant(0.1),), (constant_op.constant(0.2),)) self.assertAllClose(math_ops.sin(0.1), primal) self.assertAllClose(math_ops.cos(0.1) * 0.2, tangent) @test_util.assert_no_new_pyobjects_executing_eagerly def testNumericHigherOrder(self): def f(x): pointwise = math_ops.sin(x) * math_ops.tan(x) return math_ops.reduce_prod( pointwise + math_ops.reduce_sum(pointwise), axis=1) _test_gradients( self, f, [constant_op.constant([[2.0, 3.0], [1.0, 4.0]])], order=3) @test_util.assert_no_new_pyobjects_executing_eagerly def testCustomGradient(self): @custom_gradient.custom_gradient def f(x): def grad(dy): return dy * math_ops.cos(x) return np.sin(x.numpy()), grad _test_gradients(self, f, [constant_op.constant([1., 2.])], order=3) @test_util.assert_no_new_pyobjects_executing_eagerly def testCustomGradientRecomputeGrad(self): @custom_gradient.recompute_grad def f(x): return math_ops.reduce_prod(math_ops.tanh(x)**2) _test_gradients(self, f, [constant_op.constant([1.])], order=3) @parameterized.named_parameters( [("Order{}".format(order), order, expected) for order, expected in enumerate(_X11_35_DERIVATIVES)]) @test_util.assert_no_new_pyobjects_executing_eagerly def testHigherOrderPureForward(self, order, expected): def _forwardgrad(f): def _compute_forwardgrad(primal): tangent = constant_op.constant(1.) with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(primal, tangent) primal_out = f(primal) return acc.jvp(primal_out) return _compute_forwardgrad def _forward(x): return x ** 3.5 f = _forward primal = constant_op.constant(1.1) for _ in range(order): f = _forwardgrad(f) self.assertAllClose(expected, f(primal)) @parameterized.named_parameters( [("Function", def_function.function), ("NoFunction", lambda f: f)]) def testGradPureForward(self, decorator): @decorator def f(x): return x ** 3.5 primal = constant_op.constant(1.1) with forwardprop.ForwardGradientAccumulator() as outer_acc: outer_acc.watch(primal, constant_op.constant(1.)) with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(primal, constant_op.constant(1.)) primal_out = f(primal) inner_jvp = acc.jvp(primal_out) outer_jvp = outer_acc.jvp(inner_jvp) self.assertAllClose(1.1 ** 3.5, primal_out) self.assertAllClose(3.5 * 1.1 ** 2.5, inner_jvp) self.assertAllClose(3.5 * 2.5 * 1.1 ** 1.5, outer_jvp) self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out))) def testFunctionGradInFunctionPureForward(self): @def_function.function def take_gradients(): @def_function.function def f(x): return x ** 3.5 primal = constant_op.constant(1.1) with forwardprop.ForwardGradientAccumulator() as outer_acc: outer_acc.watch(primal, constant_op.constant(1.)) with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(primal, constant_op.constant(1.)) primal_out = f(primal) inner_jvp = acc.jvp(primal_out) outer_jvp = outer_acc.jvp(inner_jvp) self.assertIsNone(acc.jvp(outer_acc.jvp(primal_out))) return primal_out, inner_jvp, outer_jvp primal_out, inner_jvp, outer_jvp = take_gradients() self.assertAllClose(1.1 ** 3.5, primal_out) self.assertAllClose(3.5 * 1.1 ** 2.5, inner_jvp) self.assertAllClose(3.5 * 2.5 * 1.1 ** 1.5, outer_jvp) def testFunctionGrad(self): @def_function.function def f(x): return math_ops.reduce_prod(math_ops.tanh(x)**2) _test_gradients( self, f, [constant_op.constant([1., 2.])], order=3) @test_util.assert_no_new_pyobjects_executing_eagerly def testHVPMemory(self): def fun(x): return math_ops.reduce_prod(math_ops.tanh(x)**2) primals = constant_op.constant([1., 2., 3.]) tangents = constant_op.constant([3., 4., 5.]) _hvp(fun, (primals,), (tangents,)) @test_util.assert_no_new_pyobjects_executing_eagerly def testHVPCorrectness(self): def fun(x): return math_ops.reduce_prod(math_ops.tanh(x)**2) primals = constant_op.constant([1., 2., 3.]) tangents = constant_op.constant([3., 4., 5.]) forwardback_hvp_eager = _hvp(fun, (primals,), (tangents,)) forwardback_hvp_function = def_function.function(_hvp)(fun, (primals,), (tangents,)) with backprop.GradientTape(persistent=True) as g: g.watch(primals) with backprop.GradientTape() as gg: gg.watch(primals) out = fun(primals) grad = array_ops.unstack(gg.gradient(out, primals)) hessian = [] for i in range(3): hessian.append(g.gradient(grad[i], primals)) hessian = array_ops.stack(hessian, axis=0) backback_hvp = math_ops.tensordot(hessian, tangents, axes=1) self.assertAllClose(backback_hvp, forwardback_hvp_eager) self.assertAllClose(backback_hvp, forwardback_hvp_function) if __name__ == "__main__": # TODO(allenl): Also test with 1.x-style graph mode. ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/forwardprop_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for core.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import pickle import threading import numpy as np from tensorflow.core.protobuf import config_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.compat import compat from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import def_function from tensorflow.python.eager import execute as execute_lib from tensorflow.python.eager import executor from tensorflow.python.eager import test from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import script_ops from tensorflow.python.ops import variables def execute(op_name, num_outputs, inputs, attrs=None): return execute_lib.execute( op_name, num_outputs, inputs, attrs, context.context()) def truncated_normal(shape): return execute( b'TruncatedNormal', 1, inputs=[shape], attrs=('dtype', dtypes.float32.as_datatype_enum, 'T', shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0] def current_device(): return constant_op.constant(1.).device def configure_virtual_cpus(): cpus = config.list_physical_devices('CPU') # Set 2 virtual CPUs config.set_virtual_device_configuration(cpus[0], [ context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration() ]) class TFETest(test_util.TensorFlowTestCase): def setUp(self): super(TFETest, self).setUp() configure_virtual_cpus() def _test_hashable(self, a, b, hashable): if hashable: self.assertIsInstance(b, collections.Hashable) self.assertLen(set([a, b]), 2) else: # TODO(gjn): Figure out how to make this work for tf.Tensor # self.assertNotIsInstance(b, collections.Hashable) with self.assertRaisesRegexp(TypeError, 'unhashable'): set([a, b]) def testEquality(self): default = ops.Tensor._USE_EQUALITY try: def _v1_check(a, b): self.assertEqual(a, a) self.assertIs(a, a) self.assertNotEqual(a, 1.0) self.assertIsNot(a, 1.0) self.assertNotEqual(a, b) self.assertIsNot(a, b) def _v2_check(a, b): self.assertEqual(a, a) self.assertIs(a, a) self.assertEqual(a, 1.0) self.assertIsNot(a, 1.0) self.assertEqual(a, b) self.assertIsNot(a, b) constant_a = constant_op.constant(1.0) constant_b = constant_op.constant(1.0) ops.disable_tensor_equality() self._test_hashable(constant_a, constant_b, True) _v1_check(constant_a, constant_b) ops.enable_tensor_equality() _v2_check(constant_a, constant_b) self._test_hashable(constant_a, constant_b, False) variable_a = variables.Variable(1.0) variable_b = variables.Variable(1.0) ops.disable_tensor_equality() _v1_check(variable_a, variable_b) self._test_hashable(variable_a, variable_b, True) ops.enable_tensor_equality() _v2_check(variable_a, variable_b) self._test_hashable(variable_a, variable_b, False) # We only test numpy behaviour in v2 mode since we'd like to match that. numpy_a = np.array(1.0) numpy_b = np.array(1.0) _v2_check(numpy_a, numpy_b) self._test_hashable(numpy_a, numpy_b, False) finally: if default: ops.enable_tensor_equality() else: ops.disable_tensor_equality() def testEqualityNan(self): default = ops.Tensor._USE_EQUALITY try: def _v1_check(a, b): self.assertEqual(a, a) self.assertIs(a, a) self.assertNotEqual(a, float('nan')) self.assertIsNot(a, float('nan')) self.assertNotEqual(a, b) self.assertIsNot(a, b) def _v2_check(a, b): self.assertNotEqual(a, a) self.assertIs(a, a) self.assertNotEqual(a, float('nan')) self.assertIsNot(a, float('nan')) self.assertNotEqual(a, b) self.assertIsNot(a, b) constant_a = constant_op.constant(float('nan')) constant_b = constant_op.constant(float('nan')) ops.disable_tensor_equality() self._test_hashable(constant_a, constant_b, True) _v1_check(constant_a, constant_b) ops.enable_tensor_equality() _v2_check(constant_a, constant_b) self._test_hashable(constant_a, constant_b, False) variable_a = variables.Variable(float('nan')) variable_b = variables.Variable(float('nan')) ops.disable_tensor_equality() _v1_check(variable_a, variable_b) self._test_hashable(variable_a, variable_b, True) ops.enable_tensor_equality() _v2_check(variable_a, variable_b) self._test_hashable(variable_a, variable_b, False) numpy_a = np.array(float('nan')) numpy_b = np.array(float('nan')) _v2_check(numpy_a, numpy_b) self._test_hashable(numpy_a, numpy_b, False) finally: if default: ops.enable_tensor_equality() else: ops.disable_tensor_equality() def testEqualityCompare(self): default = ops.Tensor._USE_EQUALITY try: tf_a = constant_op.constant([1, 2]) tf_b = constant_op.constant([1, 2]) tf_c = constant_op.constant([1, 1]) np_a = np.array([1, 2]) np_b = np.array([1, 2]) np_c = np.array([1, 1]) ops.disable_tensor_equality() # We don't do element-wise comparison self.assertNotEqual(tf_a, tf_b) self.assertNotEqual(tf_a, tf_c) # We can compare list of tensors self.assertEqual([tf_a, tf_b], [tf_a, tf_b]) self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b]) # We can compare existence in a list self.assertIn(tf_a, [tf_a, tf_b]) self.assertIn(tf_a, [tf_b, tf_a]) self.assertNotIn(tf_a, [tf_b, tf_c]) ops.enable_tensor_equality() # We do element-wise comparison but can't convert results array to bool with self.assertRaises(ValueError): bool(tf_a == tf_b) self.assertAllEqual(tf_a == tf_b, [True, True]) with self.assertRaises(ValueError): bool(tf_a == tf_c) self.assertAllEqual(tf_a == tf_c, [True, False]) self.assertNotAllEqual(tf_a, tf_c) with self.assertRaises(ValueError): bool(np_a == np_b) self.assertAllEqual(np_a == np_b, [True, True]) with self.assertRaises(ValueError): bool(np_a == np_c) self.assertAllEqual(np_a == np_c, [True, False]) self.assertNotAllEqual(np_a, np_c) # Warning even though we technically shouldn't be able to compare here, # since the id is the same both TF & numpy will handle lists with the same # value without raising an error self.assertEqual([tf_a, tf_b], [tf_a, tf_b]) with self.assertRaises(ValueError): bool([tf_a, tf_b] == [tf_b, tf_b]) self.assertEqual([np_a, np_b], [np_a, np_b]) with self.assertRaises(ValueError): bool([np_a, np_b] == [np_b, np_b]) # Similar to lists we shouldn't be able to do a `in` check such as # `if a in [a,b]`. However if `a` is the first element, it works due to # short circuiting self.assertIn(tf_a, [tf_a, tf_b]) with self.assertRaises(ValueError): bool(tf_a in [tf_b, tf_a]) with self.assertRaises(ValueError): bool(tf_a in [tf_b, tf_c]) self.assertIn(np_a, [np_a, np_b]) with self.assertRaises(ValueError): bool(np_a in [np_b, np_a]) with self.assertRaises(ValueError): bool(np_a in [np_b, np_c]) # rank 0 self.assertAllEqual( constant_op.constant(1) == constant_op.constant(1), True) self.assertAllEqual( constant_op.constant(1) == constant_op.constant(2), False) self.assertAllEqual(np.array(1) == np.array(1), True) self.assertAllEqual(np.array(1) == np.array(2), False) finally: if default: ops.enable_tensor_equality() else: ops.disable_tensor_equality() def testEqualityBroadcast(self): default = ops.Tensor._USE_EQUALITY try: tf_a = constant_op.constant([1, 1]) tf_b = constant_op.constant([1, 1]) tf_c = constant_op.constant([[1, 1], [1, 1]]) tf_d = constant_op.constant([[1, 2], [1, 2]]) tf_e = constant_op.constant([1, 1, 1]) np_a = np.array([1, 1]) np_b = np.array([1, 1]) np_c = np.array([[1, 1], [1, 1]]) np_d = np.array([[1, 2], [1, 2]]) np_e = np.array([1, 1, 1]) ops.disable_tensor_equality() # We don't do element-wise comparison self.assertNotEqual(tf_a, tf_b) self.assertNotEqual(tf_a, tf_c) self.assertNotEqual(tf_a, tf_d) ops.enable_tensor_equality() # We do element-wise comparison but can't convert results array to bool with self.assertRaises(ValueError): bool(tf_a == tf_b) self.assertAllEqual(tf_a == tf_b, [True, True]) with self.assertRaises(ValueError): bool(tf_a == tf_c) self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]]) with self.assertRaises(ValueError): bool(tf_a == tf_d) self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]]) if compat.forward_compatible(2019, 9, 25): self.assertFalse(bool(tf_a == tf_e)) self.assertTrue(bool(tf_a != tf_e)) self.assertNotAllEqual(tf_a, tf_e) else: with self.assertRaises(errors.InvalidArgumentError): bool(tf_a != tf_e) with self.assertRaises(ValueError): bool(np_a == np_b) self.assertAllEqual(np_a == np_b, [True, True]) with self.assertRaises(ValueError): bool(np_a == np_c) self.assertAllEqual(np_a == np_c, [[True, True], [True, True]]) self.assertAllEqual(np_a == np_d, [[True, False], [True, False]]) self.assertFalse(bool(np_a == np_e)) self.assertTrue(bool(np_a != np_e)) self.assertNotAllEqual(np_a, np_e) finally: if default: ops.enable_tensor_equality() else: ops.disable_tensor_equality() def testContext(self): ctx = context.Context() self.assertTrue(ctx.executing_eagerly()) self.assertEqual('', ctx.scope_name) ctx.scope_name = 'foo' self.assertEqual('foo', ctx.scope_name) self.assertEqual(context.SYNC, ctx.execution_mode) ctx.execution_mode = context.ASYNC self.assertEqual(context.ASYNC, ctx.execution_mode) ctx.execution_mode = context.SYNC self.assertEqual(context.SYNC, ctx.execution_mode) self.assertIsNone(ctx.summary_writer) ctx.summary_writer = 'mock' self.assertEqual('mock', ctx.summary_writer) self.assertIsNone(ctx.summary_recording) ctx.summary_recording = 'mock' self.assertEqual('mock', ctx.summary_recording) self.assertIsNone(ctx.summary_step) ctx.summary_step = 'mock' self.assertEqual('mock', ctx.summary_step) self.assertEqual('', ctx.device_name) self.assertEqual(ctx.device_name, ctx.device_spec.to_string()) with ctx.device('GPU:0'): self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0', ctx.device_name) self.assertEqual(ctx.device_name, ctx.device_spec.to_string()) with ctx.device(None): self.assertEqual('', ctx.device_name) self.assertEqual(ctx.device_name, ctx.device_spec.to_string()) with ctx.device('CPU:0'): self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0', ctx.device_name) self.assertEqual(ctx.device_name, ctx.device_spec.to_string()) has_cpu_device = False for x in ctx.devices(): has_cpu_device = has_cpu_device or 'CPU' in x self.assertTrue(has_cpu_device) del ctx def testAsyncBasic(self): ctx = context.Context(execution_mode=context.ASYNC) ctx.ensure_initialized() has_cpu_device = False for x in ctx.devices(): has_cpu_device = has_cpu_device or 'CPU' in x self.assertTrue(has_cpu_device) del ctx def testMultiCpuPlacement(self): with ops.device('cpu:1'): x = constant_op.constant(1.0) y = array_ops.identity(x) self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1') self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0') @test_util.run_gpu_only def testShouldCopy(self): with ops.device('gpu:0'): x = constant_op.constant(1.0) y = array_ops.identity(x) # The value we're testing y.device against will depend on what the behavior # of not explicitly specifying a device in the context is. This behavior is # subject to change (for example, in the future we may want to use GPUs, if # available, when no device is explicitly provided) self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0') def testContextSwitchStackContainsEagerMode(self): # Eager execution has been enabled, and no other context switch has # occurred, so `context_switches` should contain exactly one entry. self.assertEqual(len(context.context().context_switches.stack), 1) switch = context.context().context_switches.stack[0] # The entry should log that eager mode was entered. self.assertIs(switch.enter_context_fn, context.eager_mode) # It is not possible to build a graph function when eager execution # is enabled; the stack entry should reflect this fact. self.assertFalse(switch.is_building_function) @test_util.run_gpu_only def testInt32GPU(self): with ops.device('gpu:0'): xent = nn_ops.sparse_softmax_cross_entropy_with_logits( logits=[[0.0, 0.0]], labels=[0]) self.assertAllClose(xent, [0.69314718]) def _runInThread(self, target, args): t = threading.Thread(target=target, args=args) try: t.start() t.join() except Exception as e: raise e # Test that different thread local values are initialized to the same values # in different threads. def testContextThreadLocalMembers(self): def get_context_values(ctx): return [ ctx.executing_eagerly(), ctx.scope_name, ctx.summary_writer, ctx.summary_recording, ctx.summary_step, ctx.device_name, ctx.num_gpus() ] def get_values(ctx, values): values.extend(get_context_values(ctx)) context_values = [] ctx = context.Context() self._runInThread(get_values, (ctx, context_values)) self.assertAllEqual(context_values, get_context_values(ctx)) @test_util.run_gpu_only def testContextConfig(self): ctx = context.Context(config=config_pb2.ConfigProto( device_count={'GPU': 0})) self.assertEquals(0, ctx.num_gpus()) def testPickle(self): tmp_dir = self.get_temp_dir() fname = os.path.join(tmp_dir, 't.pickle') with open(fname, 'wb') as f: t = constant_op.constant(10.0) pickle.dump(t, f) with open(fname, 'rb') as f: t = pickle.load(f) self.assertAllEqual(t.numpy(), 10.0) @test_util.run_gpu_only def testDevicePlacementEnforcesConsistency(self): cpu = context.device('cpu:0') gpu = context.device('gpu:0') cpu.__enter__() self.assertEndsWith(current_device(), 'CPU:0') gpu.__enter__() self.assertEndsWith(current_device(), 'GPU:0') with self.assertRaisesRegexp( RuntimeError, 'Exiting device scope without proper scope nesting'): cpu.__exit__() self.assertEndsWith(current_device(), 'GPU:0') gpu.__exit__() self.assertEndsWith(current_device(), 'CPU:0') @test_util.run_gpu_only def testReEntrant(self): cpu = context.device('cpu:0') gpu = context.device('gpu:0') with cpu: with gpu: with gpu: self.assertEndsWith(current_device(), 'GPU:0') self.assertEndsWith(current_device(), 'GPU:0') self.assertEndsWith(current_device(), 'CPU:0') with gpu: self.assertEndsWith(current_device(), 'GPU:0') @test_util.run_gpu_only def testTensorPlacement(self): x = constant_op.constant(1.).gpu() with context.device('gpu:0'): y = constant_op.constant(2.) # Add would fail if t2 were not on GPU result = execute( b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy() self.assertEqual(3, result) @test_util.run_gpu_only def testResourceTensorPlacement(self): with context.device('gpu:0'): v = resource_variable_ops.ResourceVariable(1.0) with context.device('cpu:0'): # Check that even though we specified the cpu device we'll run the read op # in the device where the handle is. self.assertAllEqual( gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0) @test_util.run_gpu_only def testCopyBetweenDevices(self): x = constant_op.constant([[1., 2.], [3., 4.]]) x = x.cpu() x = x.gpu() x = x.gpu() x = x.cpu() # Invalid device with self.assertRaises(RuntimeError): x.gpu(context.context().num_gpus() + 1) @test_util.run_gpu_only def testCopyBetweenDevicesAsync(self): with context.execution_mode(context.ASYNC): x = constant_op.constant([[1., 2.], [3., 4.]]) x = x.cpu() x = x.gpu() x = x.gpu() x = x.cpu() context.context().executor.wait() # Invalid device with self.assertRaises(RuntimeError): x.gpu(context.context().num_gpus() + 1) context.context().executor.wait() context.context().executor.clear_error() @test_util.run_gpu_only def testCopyScope(self): constant = constant_op.constant(1.0) with ops.device('gpu:0'): with context.device_policy(context.DEVICE_PLACEMENT_SILENT): c = constant + 1.0 self.assertAllEqual(c, 2.0) def testPyFunctionNullContext(self): def simple_fn(unused_handle): return 1. @def_function.function def test_fn(v): script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32) return 1. test_var = variables.Variable([2., 3.]) self.assertAllEqual(test_fn(test_var), 1.0) def testPyFunctionAsync(self): def simple_fn(v): one = constant_op.constant(1.) return v + one @def_function.function def test_fn(v): return script_ops.eager_py_func(simple_fn, [v], dtypes.float32) async_executor = executor.new_executor(enable_async=True) with context.executor_scope(async_executor): test_var = variables.Variable(2.) self.assertAllEqual(test_fn(test_var), 3.0) async_executor.wait() @test_util.run_gpu_only def testNumpyForceCPU(self): cpu = constant_op.constant([[1., 2.], [3., 4.]]) c2g = cpu.gpu() self.assertAllEqual(c2g, cpu.numpy()) def testCopyFromCPUToCPU(self): ta = constant_op.constant([[1, 2], [3, 4]]) tb = ta.cpu() self.assertNotEqual(id(ta), id(tb)) self.assertAllEqual(ta, tb.numpy()) def testRegisterExceptionClass(self): with self.assertRaises(TypeError): pywrap_tensorflow.TFE_Py_RegisterExceptionClass(str) pywrap_tensorflow.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access # TODO(agarwal): add tests passing incorrect typed values to attrs. def testExecuteBasic(self): three = constant_op.constant(3) five = constant_op.constant(5) product = execute( b'Mul', num_outputs=1, inputs=[three, five], attrs=('T', three.dtype.as_datatype_enum))[0] self.assertAllEqual(15, product) def testExecuteBasicAsync(self): with context.execution_mode(context.ASYNC): three = constant_op.constant(3) five = constant_op.constant(5) product = execute( b'Mul', num_outputs=1, inputs=[three, five], attrs=('T', three.dtype.as_datatype_enum))[0] self.assertAllEqual(15, product) # Error: Invalid arguments context.set_execution_mode(context.ASYNC) with self.assertRaises(errors.InvalidArgumentError): execute( b'MatMul', num_outputs=1, inputs=[three, five], attrs=('transpose_a', False, 'transpose_b', False, 'T', three.dtype.as_datatype_enum)) context.context().executor.wait() context.context().executor.clear_error() context.context().execution_mode = context.SYNC def testExecuteTooManyNumOutputs(self): # num_outputs provided is 50, but only one output is produced. product = execute( b'Mul', num_outputs=50, inputs=[constant_op.constant(3), constant_op.constant(5)], attrs=('T', dtypes.int32.as_datatype_enum))[0] self.assertAllEqual(15, product) def testExecuteTooFewNumOutputs(self): # num_outputs provided is 0, but one output is produced. with self.assertRaises(errors.InvalidArgumentError): _ = execute( b'Mul', num_outputs=0, inputs=[constant_op.constant(3), constant_op.constant(5)], attrs=('T', dtypes.int32.as_datatype_enum))[0] @test_util.run_gpu_only def testMatMulGPU(self): three = constant_op.constant([[3.]]).gpu() five = constant_op.constant([[5.]]).gpu() product = execute( b'MatMul', num_outputs=1, inputs=[three, five], attrs=('transpose_a', False, 'transpose_b', False, 'T', three.dtype.as_datatype_enum))[0] self.assertAllEqual([[15.0]], product) def testExecuteStringAttr(self): checked_three = execute( b'CheckNumerics', num_outputs=1, inputs=[constant_op.constant(3.)], attrs=('message', 'just checking', 'T', dtypes.float32.as_datatype_enum))[0] self.assertEqual([[3]], checked_three.numpy()) def testExecuteStringAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): _ = execute( b'CheckNumerics', num_outputs=1, inputs=[constant_op.constant(3.)], attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum)) def testExecuteFloatAttr(self): almost_equal = execute( b'ApproximateEqual', num_outputs=1, inputs=[constant_op.constant(3.0), constant_op.constant(2.9)], attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0] self.assertTrue(almost_equal) def testExecuteFloatAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): _ = execute( b'ApproximateEqual', num_outputs=1, inputs=[constant_op.constant(3.0), constant_op.constant(2.9)], attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum)) def testExecuteIntAttr(self): total = execute( b'AddN', num_outputs=1, inputs=[constant_op.constant(3), constant_op.constant(4)], attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0] self.assertAllEqual(7, total) def testExecuteIntAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): _ = execute( b'AddN', num_outputs=1, inputs=[constant_op.constant(3), constant_op.constant(4)], attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2')) # Looks like we don't have an existing op with list(bool) attrs. def testExecuteBoolAttr(self): product = execute( b'MatMul', num_outputs=1, inputs=[constant_op.constant([[3]]), constant_op.constant([[5]])], attrs=('transpose_a', True, 'transpose_b', False, 'T', dtypes.int32.as_datatype_enum))[0] self.assertAllEqual([[15]], product) def testExecuteShapeAttr(self): execute( b'VarHandleOp', num_outputs=1, inputs=[], attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum, 'container', '', 'shared_name', '')) def testExecuteShapeAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'VarHandleOp', num_outputs=1, inputs=[], attrs=('shape', 1, 'dtype', dtypes.int32.as_datatype_enum, 'container', '', 'shared_name', '')) def testExecuteListStringAttr(self): execute( b'TensorSummary', num_outputs=1, inputs=[constant_op.constant(3.0)], attrs=('T', dtypes.float32.as_datatype_enum, 'description', 'tensor_summary', 'labels', ['3', 'summary'], 'display_name', 'test')) def testExecuteListStringAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'TensorSummary', num_outputs=1, inputs=[constant_op.constant(3.0)], attrs=('T', dtypes.float32.as_datatype_enum, 'description', '', 'labels', 3, 'display_name', 'test')) def testExecuteListStringAttrBadListValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'TensorSummary', num_outputs=1, inputs=[constant_op.constant(3.0)], attrs=('T', dtypes.float32.as_datatype_enum, 'description', '', 'labels', [3], 'display_name', 'test')) def testExecuteListFloatAttr(self): b = execute( b'Bucketize', num_outputs=1, inputs=[constant_op.constant([3.0, 5.0, 7.0])], attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0, 6.0]))[0] self.assertAllEqual([0, 1, 2], b) def testExecuteListFloatAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Bucketize', num_outputs=1, inputs=[constant_op.constant([3.0, 5.0, 7.0])], attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0)) def testExecuteListFloatAttrBadListValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Bucketize', num_outputs=1, inputs=[constant_op.constant([3.0, 5.0, 7.0])], attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', ['4.0', '6.0'])) def testExecuteListIntAttr(self): b = execute( b'Squeeze', num_outputs=1, inputs=[constant_op.constant([[[3.0]]])], attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0] self.assertAllEqual([3], b) def testExecuteListIntAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Squeeze', num_outputs=1, inputs=[constant_op.constant([[[3.0]]])], attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0)) def testExecuteListIntAttrBadListValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Squeeze', num_outputs=1, inputs=[constant_op.constant([[[3.0]]])], attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', ['0', '2'])) def testExecuteListTypeListShapeAttr(self): execute( b'Barrier', num_outputs=1, inputs=[], attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes', [[1, 2]], 'capacity', -1, 'container', '', 'shared_name', '')) def testExecuteListTypeAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Barrier', num_outputs=1, inputs=[], attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes', [[1, 2]], 'capacity', -1, 'container', '', 'shared_name', '')) def testExecuteListTypeAttrBadListValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Barrier', num_outputs=1, inputs=[], attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1, 'container', '', 'shared_name', '')) def testExecuteListShapeAttrBadValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Barrier', num_outputs=1, inputs=[], attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes', [1, 2], 'capacity', -1, 'container', '', 'shared_name', '')) def testExecuteListShapeAttrBadListValue(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Barrier', num_outputs=1, inputs=[], attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes', [1], 'capacity', -1, 'container', '', 'shared_name', '')) def testExecuteMultipleOutputs(self): split_dim = 1 value = [[0, 1, 2], [3, 4, 5]] x1, x2, x3 = execute( b'Split', num_outputs=3, inputs=[constant_op.constant(split_dim), constant_op.constant(value)], attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum)) self.assertAllEqual([[0], [3]], x1) self.assertAllEqual([[1], [4]], x2) self.assertAllEqual([[2], [5]], x3) def testExecuteBadNumOutputsArgument(self): with self.assertRaises(TypeError): execute( b'Relu', [], inputs=[constant_op.constant(3.0)], attrs=('T', dtypes.float32.as_datatype_enum)) def testExecuteUnknownOp(self): with self.assertRaises(errors.NotFoundError): execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None) def testExecuteUnknownAttr(self): with self.assertRaises(errors.InvalidArgumentError): execute( b'Identity', num_outputs=1, inputs=[constant_op.constant(3)], attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah')) def testComposition(self): def add(x, y): return execute( b'Add', num_outputs=1, inputs=[x, y], attrs=('T', dtypes.int32.as_datatype_enum))[0] x = constant_op.constant(1) three_x = add(add(x, x), x) self.assertEquals(dtypes.int32, three_x.dtype) self.assertAllEqual(3, three_x) @test_util.run_gpu_only def testOperationWithNoInputsRunsOnDevice(self): shape = constant_op.constant([], dtype=dtypes.int32) # x: Run the "TruncatedNormal" op CPU and copy result to GPU. x = truncated_normal(shape).gpu() # y: Explicitly run the "TruncatedNormal" op on GPU. with context.device('gpu:0'): y = truncated_normal(shape) # Add would fail if x and y were not on the same device. execute( b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum)) def testInvalidDevice(self): with self.assertRaises(ValueError): with context.device('pu:0'): _ = constant_op.constant(1) def testConvertMixedEagerTensors(self): array = np.zeros((), dtype=np.float32) tensor = constant_op.constant(0., dtype=dtypes.float32) types, tensors = execute_lib.convert_to_mixed_eager_tensors( [array, tensor], context.context()) for typ, t in zip(types, tensors): self.assertEquals(typ, dtypes.float32) self.assertIsInstance(t, ops.EagerTensor) def testConvertMixedEagerTensorsWithVariables(self): var = resource_variable_ops.ResourceVariable(1.0) types, tensors = execute_lib.convert_to_mixed_eager_tensors( ['foo', var], context.context()) self.assertAllEqual([dtypes.string, dtypes.float32], types) for t in tensors: self.assertIsInstance(t, ops.EagerTensor) # TODO(b/123637108): re-enable @test_util.run_gpu_only def disabled_testSmallIntegerOpsForcedToCPU(self): a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64) b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64) with context.device('gpu:0'): c = a + b # Op forced to CPU since all constants are integers and small. self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:CPU:0') a = array_ops.zeros((8, 10), dtype=dtypes.int64) b = array_ops.ones((8, 10), dtype=dtypes.int64) with context.device('gpu:0'): c = a + b # Op not forced to CPU since the tensors are larger than 64 elements. self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0') a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32) b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32) with context.device('gpu:0'): c = a + b # Op not forced to CPU since the constants are not integers. self.assertEqual(c.device, '/job:localhost/replica:0/task:0/device:GPU:0') def testExecutionModeIsStoredThreadLocal(self): cv = threading.Condition() count = [0] num_threads = 10 def execution_mode_test(cond, count, num_threads, ctx, mode): cond.acquire() # Ensure that all threads set their mode simultaneously # Note that this is not a simple assignment, as the execution_mode is an # @property with a custom setter. ctx.execution_mode = mode count[0] = count[0] + 1 if count[0] < num_threads: cond.wait() else: cond.notify_all() cond.release() self.assertEqual(ctx.execution_mode, mode) ctx = context.Context() threads = [] for i in range(num_threads): t = threading.Thread( target=execution_mode_test, args=(cv, count, num_threads, ctx, context.SYNC if i % 2 == 0 else context.ASYNC)) t.start() threads.append(t) for t in threads: t.join() class SendRecvTest(test_util.TensorFlowTestCase): cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0' def _send(self, tensor, tensor_name, to_device): return execute( b'_Send', num_outputs=0, inputs=[tensor], attrs=('T', tensor.dtype.as_datatype_enum, 'tensor_name', tensor_name, 'send_device', tensor.device, 'send_device_incarnation', 0, 'recv_device', to_device, 'client_terminated', True)) def _recv(self, dtype, tensor_name, from_device): device_name = context.context().device_name if not device_name: device_name = self.cpu_device return execute( b'_Recv', num_outputs=1, inputs=[], attrs=('tensor_type', dtype.as_datatype_enum, 'tensor_name', tensor_name, 'send_device', from_device, 'send_device_incarnation', 0, 'recv_device', device_name, 'client_terminated', False))[0] def setUp(self): super(SendRecvTest, self).setUp() configure_virtual_cpus() def testBasic(self): t0 = constant_op.constant(1.0) t1 = constant_op.constant(2.0) self._send(t0, 't0', self.cpu_device) self._send(t1, 't1', self.cpu_device) self.assertAllEqual( self._recv(dtypes.float32, 't0', self.cpu_device), 1.0) self.assertAllEqual( self._recv(dtypes.float32, 't1', self.cpu_device), 2.0) @test_util.run_gpu_only def testLocalCrossDevice(self): gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0' with ops.device('GPU:0'): t0 = constant_op.constant(1.0) self._send(t0, 't0', self.cpu_device) with ops.device('cpu:0'): self.assertAllEqual( self._recv(dtypes.float32, 't0', gpu_device_name), 1.0) self._send(constant_op.constant(2.0), 't1', gpu_device_name) with ops.device('GPU:0'): self.assertAllEqual( self._recv(dtypes.float32, 't1', self.cpu_device), 2.0) class EagerTensorCacheTest(test_util.TensorFlowTestCase): def setUp(self): super(EagerTensorCacheTest, self).setUp() configure_virtual_cpus() def testCacheSkipsTensorsTooLarge(self): cache = context._EagerTensorCache(max_items=100, max_tensor_size=3) cache.put('1', array_ops.zeros((2, 2))) self.assertIsNone(cache.get('1')) cache.put('2', array_ops.zeros((2))) self.assertIsNotNone(cache.get('2')) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/core_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for eager profiler.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.core.protobuf import trace_events_pb2 from tensorflow.python.eager import profiler from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.platform import gfile class ProfilerTest(test_util.TensorFlowTestCase): def test_profile(self): profiler.start() three = constant_op.constant(3) five = constant_op.constant(5) product = three * five self.assertAllEqual(15, product) with self.assertRaises(profiler.ProfilerAlreadyRunningError): profiler.start() profile_result = profiler.stop() profile_pb = trace_events_pb2.Trace() profile_pb.ParseFromString(profile_result) profile_pb_str = '%s' % profile_pb self.assertTrue('Mul' in profile_pb_str) with self.assertRaises(profiler.ProfilerNotRunningError): profiler.stop() def test_save_profile(self): logdir = self.get_temp_dir() profile_pb = trace_events_pb2.Trace() profile_result = profile_pb.SerializeToString() profiler.save(logdir, profile_result) file_list = gfile.ListDirectory(logdir) self.assertEqual(len(file_list), 2) for file_name in gfile.ListDirectory(logdir): if gfile.IsDirectory(os.path.join(logdir, file_name)): self.assertEqual(file_name, 'plugins') else: self.assertTrue(file_name.endswith('.profile-empty')) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/profiler_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for low-level eager execution primitives.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python import keras class Tests(test.TestCase): @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_MatMulCorrectResponse(self): a_2_by_2 = random_ops.random_uniform((2, 2)) b_2_by_2 = random_ops.random_uniform((2, 2)) a_100_by_784 = random_ops.random_uniform((100, 784)) b_100_by_784 = random_ops.random_uniform((100, 784)) ctx = context.context() ctx.ensure_initialized() self.assertAllClose( math_ops.matmul(a_2_by_2, b_2_by_2), pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2, b_2_by_2, "transpose_a", False, "transpose_b", False)) self.assertAllClose( math_ops.matmul(a_100_by_784, b_100_by_784, transpose_b=True), pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "MatMul", None, None, a_100_by_784, b_100_by_784, "transpose_a", False, "transpose_b", True)) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_ResourceVariableMatMulCorrectResponse(self): ctx = context.context() ctx.ensure_initialized() a_2_by_2 = constant_op.constant(1.0, shape=[2, 2]) m = resource_variable_ops.ResourceVariable(a_2_by_2) x = pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a", False, "transpose_b", False) y = pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2, a_2_by_2, "transpose_a", False, "transpose_b", False) self.assertAllEqual(x, y) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_TapeWrite(self): ctx = context.context() ctx.ensure_initialized() with backprop.GradientTape(persistent=True) as tape: a_2_by_2 = constant_op.constant(1.0, shape=[2, 2]) tape.watch(a_2_by_2) z = pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2, a_2_by_2, "transpose_a", False, "transpose_b", False) dz_dy = tape.gradient(z, [a_2_by_2])[0] self.assertAllEqual(dz_dy.numpy(), constant_op.constant(4.0, shape=[2, 2]).numpy()) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_ResourceVariableTapeWrite(self): ctx = context.context() ctx.ensure_initialized() with backprop.GradientTape(persistent=True) as tape: a_2_by_2 = constant_op.constant(1.0, shape=[2, 2]) m = resource_variable_ops.ResourceVariable(a_2_by_2) tape.watch(m) z = pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a", False, "transpose_b", False) dz_dy = tape.gradient(z, [m])[0] self.assertAllEqual(dz_dy.numpy(), constant_op.constant(4.0, shape=[2, 2]).numpy()) # Tests homogeneous list op @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_AddNCorrectResponse(self): ctx = context.context() ctx.ensure_initialized() a_2_by_2 = random_ops.random_uniform((2, 2)) b_2_by_2 = random_ops.random_uniform((2, 2)) self.assertAllClose( math_ops.add_n([a_2_by_2, b_2_by_2]), pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name, "AddN", None, None, [a_2_by_2, b_2_by_2])) # Tests homogeneous list op @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_AddNTapeWrite(self): ctx = context.context() ctx.ensure_initialized() a_2_by_2 = random_ops.random_uniform((2, 2)) b_2_by_2 = random_ops.random_uniform((2, 2)) with backprop.GradientTape(persistent=True) as tape: tape.watch(a_2_by_2) tape.watch(b_2_by_2) z1 = pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "AddN", None, None, [a_2_by_2, b_2_by_2]) z2 = math_ops.add_n([a_2_by_2, b_2_by_2]) dz1_dy = tape.gradient(z1, [a_2_by_2])[0] dz2_dy = tape.gradient(z2, [a_2_by_2])[0] self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy()) # Tests heterogeneous list op @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_IdentityNCorrectResponse(self): ctx = context.context() ctx.ensure_initialized() a_2_by_2 = random_ops.random_uniform((2, 2)) b_2_by_2 = random_ops.random_uniform((2, 2)) self.assertAllClose( array_ops.identity_n([a_2_by_2, b_2_by_2]), pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name, "IdentityN", None, None, [a_2_by_2, b_2_by_2])) # Tests heterogeneous list op @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_IdentityNTapeWrite(self): ctx = context.context() ctx.ensure_initialized() a_2_by_2 = random_ops.random_uniform((2, 2)) b_2_by_2 = random_ops.random_uniform((2, 2)) with backprop.GradientTape(persistent=True) as tape: tape.watch(a_2_by_2) tape.watch(b_2_by_2) z1 = pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "IdentityN", None, None, [a_2_by_2, b_2_by_2]) z2 = array_ops.identity_n([a_2_by_2, b_2_by_2]) dz1_dy = tape.gradient(z1[0], [a_2_by_2])[0] dz2_dy = tape.gradient(z2[0], [a_2_by_2])[0] self.assertAllEqual(dz1_dy.numpy(), dz2_dy.numpy()) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastpathExecute_InvalidInputs(self): a_2_by_2 = random_ops.random_uniform((2, 2)) ctx = context.context() ctx.ensure_initialized() assert ctx.executing_eagerly( ), "The prototype doesn't contain C code for graph construction" ctx_handle = ctx._handle # pylint: disable=protected-access # Not enough base params with self.assertRaisesRegexp(ValueError, "at least 5 items in the input tuple"): pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, "Identity") # Not enough inputs with self.assertRaisesRegexp(ValueError, "Expected to be at least 6, was 5"): pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx_handle, "Identity", None, []) # Bad type with self.assertRaisesRegexp(TypeError, "expected a string for op_name"): pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, ctx_handle, None, [], a_2_by_2) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testFastPathExecute_InvalidAttributes(self): split_dim = constant_op.constant(0, dtype=dtypes.int32) value = constant_op.constant([0, 1, 2, 3], dtype=dtypes.float32) ctx = context.context() ctx.ensure_initialized() ctx_handle = ctx._handle with self.assertRaises(core._FallbackException): pywrap_tensorflow.TFE_Py_FastPathExecute(ctx_handle, ctx.device_name, "Split", None, None, split_dim, value, "num_split", -1) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testInvalidNumOutputs(self): with self.assertRaisesRegexp( Exception, "Value for attr 'num_split' of -1 must be at least minimum 1"): array_ops.split(value=[1, 2, 3], num_or_size_splits=-1) with self.assertRaisesRegexp( Exception, "Value for attr 'num_split' of 0 must be at least minimum 1"): array_ops.split(value=[1, 2, 3], num_or_size_splits=0) def testIsFunction(self): ctx = context.context() self.assertFalse(ctx.has_function("not_a_function")) @def_function.function def f(): return 1. self.assertTrue(ctx.has_function(f.get_concrete_function().name)) def testEagerExecute_InvalidType(self): # Test case for GitHub issue 26879. value = keras.layers.Input((128, 128, 1), dtype="float32") with self.assertRaisesRegexp(TypeError, "Expected list for 'values' argument"): _ = array_ops.stack(value, axis=1) def testGraphResourceVariableRaisesFallback(self): with ops.Graph().as_default(): a_2_by_2 = constant_op.constant(1.0, shape=[2, 2]) m = resource_variable_ops.ResourceVariable(a_2_by_2) ctx = context.context() ctx.ensure_initialized() with self.assertRaises(core._FallbackException): pywrap_tensorflow.TFE_Py_FastPathExecute(ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a", False, "transpose_b", False) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/pywrap_tfe_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for remote execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import numpy as np from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import remote from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variables from tensorflow.python.training import server_lib class SingleWorkerTest(test.TestCase): def setUp(self): super(SingleWorkerTest, self).setUp() workers, _ = test_util.create_local_cluster(1, 0) remote.connect_to_remote_host(workers[0].target) def testMultiDeviceFunctionBasic(self): @def_function.function def basic(i): with ops.device('/job:localhost/replica:0/task:0/cpu:0'): a = constant_op.constant([2]) + i with ops.device('/job:worker/replica:0/task:0/cpu:0'): b = constant_op.constant([1]) return a + b self.assertAllEqual(basic(constant_op.constant([2])).numpy(), [5]) self.assertAllEqual(basic(constant_op.constant([1])).numpy(), [4]) def testMultiDeviceFunctionVariable(self): with ops.device('/job:worker/replica:0/task:0/cpu:0'): variable_b = variables.Variable(1) @def_function.function def with_variable(i): return i + variable_b self.assertAllEqual(with_variable(constant_op.constant([2])).numpy(), [3]) def testMultiDeviceFunctionRemoteOutput(self): with ops.device('/job:worker/replica:0/task:0/cpu:0'): variable_b = variables.Variable(1) @def_function.function def remote_output(i): return variable_b, i + variable_b with self.assertRaises(errors.UnimplementedError) as cm: remote_output(constant_op.constant([1])) self.assertIn( 'Currently, outputting tensors on remote devices is not supported.', cm.exception.message) def testMultiDeviceFunctionAmbiguousDevice(self): @def_function.function def ambiguous_device(i): with ops.device('cpu:0'): return i + constant_op.constant([2]) with self.assertRaises(errors.InvalidArgumentError) as cm: with ops.device('/job:worker/replica:0/task:0/cpu:0'): ambiguous_device(constant_op.constant([2])).numpy() self.assertIn('the output node must match exactly one device', cm.exception.message) def testStreaming(self): """A mini stress test for streaming - issuing many RPCs back to back.""" with ops.device('job:worker/replica:0/task:0/device:CPU:0'): x = array_ops.ones([2, 2]) y = array_ops.zeros([2, 2]) num_iters = 200 for _ in range(num_iters): y = x + y # Ask for y's shape after every 10 additions on average. # This exercises waiting for remote shape logic in TensorHandle. if random.randint(1, 10) == 1: _ = y.shape np.testing.assert_array_equal( [[num_iters, num_iters], [num_iters, num_iters]], y.numpy()) class MultiWorkersTest(test.TestCase): def setUp(self): super(MultiWorkersTest, self).setUp() workers, _ = test_util.create_local_cluster(3, 0) remote.connect_to_remote_host( [workers[0].target, workers[1].target, workers[2].target]) def testMultiDeviceFunctionOnLocalDevice(self): with ops.device('/job:worker/replica:0/task:1'): variable_b = variables.Variable(1.0) @def_function.function def remote_function(i): with ops.device('/job:worker/replica:0/task:0'): a = i + variable_b c = a + 1.0 return c self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) def testMultiDeviceFunctionOnRemoteDevice(self): with ops.device('/job:worker/replica:0/task:1'): variable_b = variables.Variable(1.0) @def_function.function def remote_function(i): with ops.device('/job:worker/replica:0/task:0'): a = i + variable_b c = a + 1.0 return c context.context().mirroring_policy = context.MIRRORING_NONE with ops.device('/job:worker/replica:0/task:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) if test_util.is_gpu_available(): with ops.device('/job:worker/replica:0/task:0/device:GPU:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) context.context().mirroring_policy = context.MIRRORING_ALL with ops.device('/job:worker/replica:0/task:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) if test_util.is_gpu_available(): with ops.device('/job:worker/replica:0/task:0/device:GPU:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) def testMultiDeviceWhileLoopOnRemoteDevice(self): with ops.device('/job:worker/replica:0/task:1'): variable_b = variables.Variable(1.0) @def_function.function def remote_function(i): def body(i, _): with ops.device('/job:worker/replica:0/task:0'): a = i + variable_b return a + 1.0, 1 return control_flow_ops.while_loop_v2(lambda _, d: d < 1, body, [i, 0])[0] context.context().mirroring_policy = context.MIRRORING_NONE with ops.device('/job:worker/replica:0/task:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) if test_util.is_gpu_available(): with ops.device('/job:worker/replica:0/task:0/device:GPU:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) context.context().mirroring_policy = context.MIRRORING_ALL with ops.device('/job:worker/replica:0/task:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) if test_util.is_gpu_available(): with ops.device('/job:worker/replica:0/task:0/device:GPU:0'): self.assertAllEqual(remote_function(constant_op.constant([1.0])), [3.0]) def testSimpleParameterServer(self): with ops.device('/job:worker/task:2/device:CPU:0'): v1 = variables.Variable(initial_value=0) v2 = variables.Variable(initial_value=10) @def_function.function def worker_fn(): v1.assign_add(1) v2.assign_sub(2) return v1.read_value() + v2.read_value() with ops.device('/job:worker/task:0/device:CPU:0'): self.assertAllEqual(worker_fn(), 9) with ops.device('/job:worker/task:1/device:CPU:0'): self.assertAllEqual(worker_fn(), 8) _GRPC_PREFIX = 'grpc://' class MultiJobsTest(test.TestCase): def setUp(self): super(MultiJobsTest, self).setUp() workers, ps = test_util.create_local_cluster(2, 1) cluster = { 'my_worker': [ _strip_prefix(workers[0].target, _GRPC_PREFIX), _strip_prefix(workers[1].target, _GRPC_PREFIX), ], 'my_ps': [_strip_prefix(ps[0].target, _GRPC_PREFIX)], } remote.connect_to_cluster(server_lib.ClusterSpec(cluster)) def testSimpleParameterServer(self): with ops.device('/job:my_ps/task:0/device:CPU:0'): v1 = variables.Variable(initial_value=0) v2 = variables.Variable(initial_value=10) @def_function.function def worker_fn(): v1.assign_add(1) v2.assign_sub(2) return v1.read_value() + v2.read_value() with ops.device('/job:my_worker/task:0/device:CPU:0'): self.assertAllEqual(worker_fn(), 9) with ops.device('/job:my_worker/task:1/device:CPU:0'): self.assertAllEqual(worker_fn(), 8) def _strip_prefix(s, prefix): return s[len(prefix):] if s.startswith(prefix) else s if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/remote_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """State management for eager execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import copy import random import threading import numpy as np import six from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python import tf2 from tensorflow.python.eager import executor from tensorflow.python.eager import monitoring from tensorflow.python.framework import c_api_util from tensorflow.python.framework import device as pydev from tensorflow.python.util import compat from tensorflow.python.util import is_in_graph_mode from tensorflow.python.util import tf_contextlib from tensorflow.python.util.tf_export import tf_export GRAPH_MODE = 0 EAGER_MODE = 1 default_execution_mode = EAGER_MODE if tf2.enabled() else GRAPH_MODE # Cache from (old_device_name, partial_new_device_name) -> (new_device_name, # new_device_spec). # Note that we do not protect this with a lock and instead rely on python's GIL # and the idempotent nature of writes to provide thread safety. _device_parsing_cache = {} _starting_device_spec = pydev.DeviceSpec.from_string("") _MAXINT32 = 2**31 - 1 DEVICE_PLACEMENT_EXPLICIT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_EXPLICIT DEVICE_PLACEMENT_WARN = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_WARN DEVICE_PLACEMENT_SILENT = pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT DEVICE_PLACEMENT_SILENT_FOR_INT32 = ( pywrap_tensorflow.TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32) SYNC = 0 ASYNC = 1 MIRRORING_NONE = pywrap_tensorflow.TFE_MIRRORING_NONE MIRRORING_ALL = pywrap_tensorflow.TFE_MIRRORING_ALL _python_eager_context_create_counter = monitoring.Counter( "/tensorflow/api/python/eager_context_create_counter", "Counter for number of eager contexts created in Python.") class _EagerTensorCache(object): """Simple cache which evicts items based on length in a FIFO manner.""" def __init__(self, max_items=256, max_tensor_size=10000): self._data = collections.OrderedDict() self._max_items = max_items self._max_tensor_size = max_tensor_size def put(self, key, value): if value._num_elements() > self._max_tensor_size: # pylint: disable=protected-access return self._data[key] = value if len(self._data) > self._max_items: self._data.popitem(last=False) def get(self, key): return self._data.get(key, None) def flush(self): self._data = {} class FunctionCallOptions(object): """Options applied at call sites of eager functions. Eager functions are functions decorated with tf.contrib.eager.defun. """ def __init__(self, executor_type=None, config_proto=None): """Constructor. Args: executor_type: (optional) name of the executor to be used to execute the eager function. If None or an empty string, the default Tensorflow executor will be used. config_proto: (optional) a `config_pb2.ConfigProto` proto or a serialized string of that proto. The config used by Grappler when optimizing the function graph. Each concrete function is optimized the first time is called. Changing config_proto after the first call has no effect. If config_proto is None, an empty RewriterConfig will be used. """ self.config_proto_serialized = config_proto self.executor_type = executor_type @property def executor_type(self): return self._executor_type @executor_type.setter def executor_type(self, executor_type): self._executor_type = executor_type @property def config_proto_serialized(self): return self._config_proto_serialized @config_proto_serialized.setter def config_proto_serialized(self, config): if isinstance(config, config_pb2.ConfigProto): self._config_proto_serialized = config.SerializeToString() elif isinstance(config, str): self._config_proto_serialized = config elif config is None: self._config_proto_serialized = ( config_pb2.ConfigProto().SerializeToString()) else: raise ValueError("the rewriter config must be either a " "config_pb2.ConfigProto, or a serialized string of that " "proto or None. got: {}".format(type(config))) # Map from context_id (an int) to _TensorCaches. # Dicts are thread safe in CPython. # TODO(iga): Remove this once TensorCaches are moved to C++. _tensor_caches_map = {} class _TensorCaches(threading.local): """Thread local tensor caches.""" def __init__(self): super(_TensorCaches, self).__init__() self._ones_rank_cache = None self._zeros_cache = None @property def ones_rank_cache(self): if not self._ones_rank_cache: self._ones_rank_cache = _EagerTensorCache() return self._ones_rank_cache @property def zeros_cache(self): if not self._zeros_cache: self._zeros_cache = _EagerTensorCache() return self._zeros_cache class _ThreadLocalData(threading.local): """Thread local storage for the eager context.""" def __init__(self): super(_ThreadLocalData, self).__init__() self.device_spec = _starting_device_spec self.device_name = "" self.mode = default_execution_mode self.is_eager = default_execution_mode == EAGER_MODE self.scope_name = "" self.summary_writer = None self.summary_recording = None self.summary_recording_distribution_strategy = True self.summary_step = None self.function_call_options = None self.executor = None ContextSwitch = collections.namedtuple( "ContextSwitch", ["is_building_function", "enter_context_fn", "device_stack"]) # `_ContextSwitchStack` is a `threading.local` to match the semantics of # ``DefaultGraphStack`, which is also a `threading.local`. class _ContextSwitchStack(threading.local): """A thread-local stack of context switches.""" def __init__(self, eager): super(_ContextSwitchStack, self).__init__() self.stack = [] if eager: # Initialize the stack with a pointer to enter the eager context; this # ensures that the fact that eager execution was enabled is propagated # across threads, since (1) `enable_eager_execution` modifies a # process-level flag (`default_execution_mode`) and (2) `__init__` is # called each time a threading.local object is used in a separate thread. self.push(is_building_function=False, enter_context_fn=eager_mode, device_stack=None) def push(self, is_building_function, enter_context_fn, device_stack): """Push metadata about a context switch onto the stack. A context switch can take any one of the two forms: installing a graph as the default graph, or entering the eager context. For each context switch, we record whether or not the entered context is building a function. Args: is_building_function: (bool.) Whether the context is building a function. enter_context_fn: (function.) A callable that executes the context switch. For example, `graph.as_default` or `eager_mode`. device_stack: If applicable, the device function stack for this graph. When breaking out of graphs in init_scope, the innermost nonempty device stack is used. Eager contexts put `None` here and the value is never used. """ self.stack.append( ContextSwitch(is_building_function, enter_context_fn, device_stack)) def pop(self): """Pop the stack.""" self.stack.pop() class LogicalDevice( collections.namedtuple("LogicalDevice", ["name", "device_type"])): """Abstraction for a device initialized by the runtime. A LogicalDevice corresponds to a initialized instance on a PhysicalDevice or a remote device available in the cluster. Tensors and operations can be placed on a specific LogicalDevice by calling `tf.device()` with the `name` of the LogicalDevice. Fields: name: The fully qualified name of the device. Can be used for Op or function placement. device_type: String declaring the type of device such as "CPU" or "GPU". """ pass @tf_export("config.experimental.VirtualDeviceConfiguration") class VirtualDeviceConfiguration( collections.namedtuple("VirtualDeviceConfiguration", ["memory_limit"])): """Configuration class for virtual devices for a PhysicalDevice. Fields: memory_limit: (optional) Maximum memory (in MB) to allocate on the virtual device. Currently only supported for GPUs. """ def __new__(cls, memory_limit=None): return super(VirtualDeviceConfiguration, cls).__new__(cls, memory_limit) class PhysicalDevice( collections.namedtuple("PhysicalDevice", ["name", "device_type"])): """Abstraction for a locally visible physical device. TensorFlow can utilize various devices such as the CPU or multiple GPUs for computation. Before initializing a local device for use, the user can customize certain properties of the device such as it's visibility or memory configuration. Once a PhysicalDevice is initialized one or many LogicalDevice objects are created. Use tf.config.set_virtual_device_configuration() to create multiple LogicalDevice objects for a PhysicalDevice. This is useful when separation between models is needed. Fields: name: Unique identifier for device. device_type: String declaring the type of device such as "CPU" or "GPU". """ pass class _AtomicCounter(object): """A simple atomic counter.""" def __init__(self): self._value = 0 self._lock = threading.Lock() def increment_and_get(self): with self._lock: self._value += 1 return self._value _context_id_counter = _AtomicCounter() class _TensorCacheDeleter(object): """Deletes tensor caches for a given context.""" def __init__(self, context_id): self._context_id = context_id def __del__(self): if _tensor_caches_map is None: return if self._context_id in _tensor_caches_map: del _tensor_caches_map[self._context_id] # Thread-local stack of execution callbacks. _post_execution_callbacks = threading.local() # TODO(agarwal): rename to EagerContext / EagerRuntime ? # TODO(agarwal): consider keeping the corresponding Graph here. class Context(object): """Environment in which eager operations execute.""" # TODO(agarwal): create and link in some documentation for `execution_mode`. # pylint: disable=redefined-outer-name def __init__(self, config=None, device_policy=None, execution_mode=None, server_def=None): """Creates a new Context. Args: config: (Optional.) A `ConfigProto` protocol buffer with configuration options for the Context. Note that a lot of these options may be currently unimplemented or irrelevant when eager execution is enabled. device_policy: (Optional.) What policy to use when trying to run an operation on a device with inputs which are not on that device. When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Defaults to DEVICE_PLACEMENT_SILENT. Valid values: - DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not correct. - DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right device but raises a warning. - DEVICE_PLACEMENT_SILENT: silently copies the tensors. This might hide performance problems. - DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors, raising errors on the other ones. execution_mode: (Optional.) Policy controlling how operations dispatched are actually executed. When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - SYNC: executes each operation synchronously. - ASYNC: executes each operation asynchronously. These operations may return "non-ready" handles. server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on remote devices. GrpcServers need to be started by creating an identical server_def to this, and setting the appropriate task_indexes, so that the servers can communicate. It will then be possible to execute operations on remote devices. Raises: ValueError: If execution_mode is not valid. """ # This _id is used only to index the tensor caches. # TODO(iga): Remove this when tensor caches are moved to C++. self._id = _context_id_counter.increment_and_get() self._tensor_cache_deleter = _TensorCacheDeleter(self._id) _tensor_caches_map[self._id] = _TensorCaches() self._config = config self._thread_local_data = _ThreadLocalData() self._context_switches = _ContextSwitchStack(self.executing_eagerly()) self._context_handle = None self._context_devices = None self._seed = None self._initialize_lock = threading.Lock() self._initialized = False if device_policy is None: device_policy = DEVICE_PLACEMENT_SILENT self._device_policy = device_policy self._mirroring_policy = None if execution_mode not in (None, SYNC, ASYNC): raise ValueError( "execution_mode should be None/SYNC/ASYNC. Got %s" % execution_mode) if execution_mode is None: execution_mode = SYNC self._default_is_async = execution_mode == ASYNC self._server_def = server_def self._collective_ops_server_def = None self._collective_leader = None self._collective_scoped_allocator_enabled_ops = None self._collective_use_nccl_communication = None self._collective_device_filters = None self._device_lock = threading.Lock() self._physical_devices = None self._visible_device_list = [] self._memory_growth_map = None self._virtual_device_map = {} # Values set after construction self._optimizer_jit = None self._intra_op_parallelism_threads = None self._inter_op_parallelism_threads = None self._soft_device_placement = None self._log_device_placement = None self._optimizer_experimental_options = {} _python_eager_context_create_counter.get_cell().increase_by(1) # pylint: enable=redefined-outer-name def _set_global_seed(self, seed): """Set a global eager mode seed for random ops.""" self._seed = seed # `random.Random(seed)` needs `seed` to be hashable, while values of type # e.g. `np.int64` or `np.ndarray` are not. We use `int(...)` to convert them # to int. try: hash(seed) except TypeError: seed = int(np.array(seed)) self._rng = random.Random(seed) # Also clear the kernel cache, to reset any existing seeds if self._context_handle is not None: pywrap_tensorflow.TFE_ContextClearCaches(self._context_handle) def _internal_operation_seed(self): """Returns a fake operation seed. In eager mode, user shouldn't set or depend on operation seed. Here, we generate a random seed based on global seed to make operation's randomness different and depend on the global seed. Returns: A fake operation seed based on global seed. """ return self._rng.randint(0, _MAXINT32) def _initialize_logical_devices(self): """Helper to initialize devices.""" # Store list of devices self._logical_devices = [] self._context_devices = [] device_list = pywrap_tensorflow.TFE_ContextListDevices( self._context_handle) try: self._num_gpus = 0 for i in range(pywrap_tensorflow.TF_DeviceListCount(device_list)): dev_name = pywrap_tensorflow.TF_DeviceListName(device_list, i) self._context_devices.append(pydev.canonical_name(dev_name)) spec = pydev.DeviceSpec.from_string(dev_name) self._logical_devices.append( LogicalDevice(name=dev_name, device_type=spec.device_type)) dev_type = pywrap_tensorflow.TF_DeviceListType(device_list, i) if dev_type == "GPU": self._num_gpus += 1 finally: pywrap_tensorflow.TF_DeleteDeviceList(device_list) def ensure_initialized(self): """Initialize handle and devices if not already done so.""" if self._initialized: return with self._initialize_lock: if self._initialized: return assert self._context_devices is None opts = pywrap_tensorflow.TFE_NewContextOptions() try: config_str = self.config.SerializeToString() pywrap_tensorflow.TFE_ContextOptionsSetConfig(opts, config_str) if self._device_policy is not None: pywrap_tensorflow.TFE_ContextOptionsSetDevicePlacementPolicy( opts, self._device_policy) if self._mirroring_policy is not None: pywrap_tensorflow.TFE_ContextOptionsSetMirroringPolicy( opts, self._mirroring_policy) if self._default_is_async == ASYNC: pywrap_tensorflow.TFE_ContextOptionsSetAsync(opts, True) context_handle = pywrap_tensorflow.TFE_NewContext(opts) finally: pywrap_tensorflow.TFE_DeleteContextOptions(opts) assert not (self._server_def and self._collective_ops_server_def), ( "Cannot enable remote execution as well as collective ops at the " "moment. If this is important to you, please file an issue.") if self._server_def is not None: server_def_str = self._server_def.SerializeToString() pywrap_tensorflow.TFE_ContextSetServerDef(context_handle, 600, server_def_str) elif self._collective_ops_server_def is not None: server_def_str = self._collective_ops_server_def.SerializeToString() pywrap_tensorflow.TFE_EnableCollectiveOps(context_handle, server_def_str) self._context_handle = context_handle self._initialize_logical_devices() self._initialized = True def _clear_caches(self): self.ones_rank_cache().flush() self.zeros_cache().flush() pywrap_tensorflow.TFE_ClearScalarCache() def set_server_def(self, server_def, keep_alive_secs=600): """Allow setting a server_def on the context. When a server def is replaced, it effectively clears a bunch of caches within the context. If you attempt to use a tensor object that was pointing to a tensor on the remote device, it will raise an error. Args: server_def: A tensorflow::ServerDef proto. Enables execution on remote devices. keep_alive_secs: Num. seconds after which the remote end will hang up. As long as the client is still alive, the server state for the context will be kept alive. If the client is killed (or there is some failure), the server will clean up its context keep_alive_secs after the final RPC it receives. Raises: ValueError: if server_def is None. """ if not server_def: raise ValueError("server_def is None.") self._server_def = server_def if self._context_handle: server_def_str = server_def.SerializeToString() pywrap_tensorflow.TFE_ContextSetServerDef(self._context_handle, keep_alive_secs, server_def_str) self._initialize_logical_devices() # Clear all the caches in case there are remote tensors in them. self._clear_caches() def enable_collective_ops(self, server_def): """Enable distributed collective ops with an appropriate server_def. Args: server_def: A tensorflow::ServerDef proto. Enables execution on remote devices. Raises: ValueError: if server_def is None. RuntimeError: if this method is not called at program startup. """ if not server_def: raise ValueError("server_def is None.") if self._context_handle is not None: raise RuntimeError("Collective ops must be enabled at program startup") self._collective_ops_server_def = server_def def configure_collective_ops( self, collective_leader="", scoped_allocator_enabled_ops=("CollectiveReduce",), use_nccl_communication=False, device_filters=None): """Configure collective ops. Collective group leader is necessary for collective ops to run, other configurations are mainly for the purpose of performance. Args: collective_leader: a device string for collective leader, e.g. "/job:worker/replica:0/task:"; empty string means local execution of collective ops. scoped_allocator_enabled_ops: a tuple or a list of op names for scoped allocator to run with. use_nccl_communication: whether to use nccl communication for collective ops. device_filters: a tuple or a list of device strings. If set, corresponding task can only see the devices filtered by these device filters. Raises: RuntimeError: if this method is not called at program startup. """ if self._collective_leader is not None: if (self._collective_leader != collective_leader or self._collective_scoped_allocator_enabled_ops != scoped_allocator_enabled_ops or self._collective_use_nccl_communication != use_nccl_communication or self._collective_device_filters != device_filters): raise ValueError("Collective ops are already configured.") else: return if self._context_handle is not None: raise RuntimeError("Collective ops must be configured at program startup") self._collective_leader = collective_leader self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops self._collective_use_nccl_communication = use_nccl_communication self._collective_device_filters = device_filters @property def _handle(self): if self._context_handle is None: raise AssertionError("Context must be initialized first.") return self._context_handle @property def _devices(self): if self._context_devices is None: raise AssertionError("Context must be initialized first.") return self._context_devices def __str__(self): if self._context_handle is None: return "Eager TensorFlow Context. Devices currently uninitialized." else: devices = self._devices lines = ["Eager TensorFlow Context with %d devices" % (len(devices))] for i, d in enumerate(devices): lines.append(" Device %d: %s" % (i, d)) return "\n".join(lines) @tf_contextlib.contextmanager def _mode(self, mode): """A context manager to allow setting the mode to EAGER/GRAPH.""" ctx = self._thread_local_data old_mode = ctx.mode old_is_eager = ctx.is_eager ctx.mode = mode ctx.is_eager = mode == EAGER_MODE if mode == EAGER_MODE: # Entering graph mode does not provide us with sufficient information to # record a context switch; graph-based context switches are only logged # when a graph is registered as the default graph. self.context_switches.push(False, eager_mode, None) try: yield finally: ctx.is_eager = old_is_eager ctx.mode = old_mode if mode == EAGER_MODE: self.context_switches.pop() def executing_eagerly(self): """Returns True if current thread has eager executing enabled.""" return self._thread_local_data.is_eager def ones_rank_cache(self): """Per-device cache for scalars.""" return _tensor_caches_map[self._id].ones_rank_cache def zeros_cache(self): """Per-device cache for scalars.""" return _tensor_caches_map[self._id].zeros_cache @property def scope_name(self): """Returns scope name for the current thread.""" return self._thread_local_data.scope_name @scope_name.setter def scope_name(self, s): """Sets scope name for the current thread.""" self._thread_local_data.scope_name = s @property def summary_writer(self): """Returns default summary writer for the current thread.""" return self._thread_local_data.summary_writer @summary_writer.setter def summary_writer(self, writer): """Sets default summary writer for the current thread.""" self._thread_local_data.summary_writer = writer @property def summary_recording(self): """Returns summary recording condition.""" return self._thread_local_data.summary_recording @summary_recording.setter def summary_recording(self, condition): """Sets summary recording condition.""" self._thread_local_data.summary_recording = condition @property def summary_recording_distribution_strategy(self): """Returns summary recording condition for distribution strategy.""" return self._thread_local_data.summary_recording_distribution_strategy @summary_recording_distribution_strategy.setter def summary_recording_distribution_strategy(self, condition): """Sets summary recording condition for distribution strategy.""" self._thread_local_data.summary_recording_distribution_strategy = condition @property def summary_step(self): """Returns summary step variable.""" return self._thread_local_data.summary_step @summary_step.setter def summary_step(self, step): """Sets summary step variable.""" self._thread_local_data.summary_step = step @property def device_name(self): """Returns the device name for the current thread.""" return self._thread_local_data.device_name @property def device_spec(self): """Returns the device spec for the current thread.""" return self._thread_local_data.device_spec def _set_device(self, device_name, device_spec): self._thread_local_data.device_name = device_name self._thread_local_data.device_spec = device_spec def device(self, name): """Context-manager to force placement of operations and Tensors on a device. Args: name: Name of the device or None to get default placement. Returns: Context manager that forces device placement. Raises: ValueError: If name is not a string or is an invalid device name. RuntimeError: If device scopes are not properly nested. """ return _EagerDeviceContext(self, name) def devices(self): """List of the names of devices available to execute operations.""" return self._devices # TODO(fishx): remove this property. @property def execution_mode(self): """Gets execution mode for current thread.""" return ASYNC if self.is_async() else SYNC @execution_mode.setter def execution_mode(self, mode): """Sets execution mode for current thread.""" if mode not in (None, SYNC, ASYNC): raise ValueError( "Execution mode should be None/SYNC/ASYNC. Got %s" % mode) if mode is None: mode = SYNC enable_async = (mode == ASYNC) if self.is_async() != enable_async: # Only set the execution mode if the context has already been initialized if self._context_handle is not None: self.executor.wait() executor_new = executor.new_executor(enable_async) self._thread_local_data.executor = executor_new pywrap_tensorflow.TFE_ContextSetExecutorForThread( self._context_handle, executor_new.handle()) else: self._default_is_async = enable_async def is_async(self): if self._context_handle is not None: return self.executor.is_async() else: return self._default_is_async @property def executor(self): ensure_initialized() return executor.Executor( pywrap_tensorflow.TFE_ContextGetExecutorForThread(self._context_handle)) @executor.setter def executor(self, e): ensure_initialized() pywrap_tensorflow.TFE_ContextSetExecutorForThread(self._context_handle, e.handle()) @property def config(self): """Return the ConfigProto with all runtime deltas applied.""" # Ensure physical devices have been discovered and config has been imported self._initialize_physical_devices() config = config_pb2.ConfigProto() if self._config is not None: config.CopyFrom(self._config) if self._optimizer_jit is not None: config.graph_options.optimizer_options.global_jit_level = ( config_pb2.OptimizerOptions.ON_1 if self._optimizer_jit else config_pb2.OptimizerOptions.OFF) if self._intra_op_parallelism_threads is not None: config.intra_op_parallelism_threads = self._intra_op_parallelism_threads if self._inter_op_parallelism_threads is not None: config.inter_op_parallelism_threads = self._inter_op_parallelism_threads if self._soft_device_placement is not None: config.allow_soft_placement = self._soft_device_placement else: config.allow_soft_placement = self.executing_eagerly() if self._log_device_placement is not None: config.log_device_placement = self._log_device_placement def rewriter_toggle(option): toggle = self._optimizer_experimental_options.get(option, None) if toggle is None: return setattr(config.graph_options.rewrite_options, option, (rewriter_config_pb2.RewriterConfig.ON if toggle else rewriter_config_pb2.RewriterConfig.OFF)) def rewriter_bool(option): toggle = self._optimizer_experimental_options.get(option, None) if toggle is None: return setattr(config.graph_options.rewrite_options, option, toggle) rewriter_toggle("layout_optimizer") rewriter_toggle("constant_folding") rewriter_toggle("shape_optimization") rewriter_toggle("remapping") rewriter_toggle("arithmetic_optimization") rewriter_toggle("dependency_optimization") rewriter_toggle("loop_optimization") rewriter_toggle("function_optimization") rewriter_toggle("debug_stripper") rewriter_bool("disable_model_pruning") rewriter_toggle("scoped_allocator_optimization") rewriter_toggle("pin_to_host_optimization") rewriter_toggle("implementation_selector") rewriter_toggle("auto_mixed_precision") rewriter_bool("disable_meta_optimizer") nodes = self._optimizer_experimental_options.get("min_graph_nodes", None) if nodes is not None: config.graph_options.rewrite_options.min_graph_nodes = nodes # Compute device counts config.device_count["CPU"] = 0 config.device_count["GPU"] = 0 for dev in self._physical_devices: if dev not in self._visible_device_list: continue virtual_devices = self._virtual_device_map.get(dev) if virtual_devices is None: config.device_count[dev.device_type] += 1 else: config.device_count[dev.device_type] += len(virtual_devices) # Configure gpu_options gpu_options = self._compute_gpu_options() config.gpu_options.MergeFrom(gpu_options) # Configure collective ops if self._collective_leader: config.experimental.collective_group_leader = self._collective_leader if self._collective_scoped_allocator_enabled_ops: rewrite_options = config.graph_options.rewrite_options rewrite_options.scoped_allocator_optimization = ( rewriter_config_pb2.RewriterConfig.ON) del rewrite_options.scoped_allocator_opts.enable_op[:] for op in self._collective_scoped_allocator_enabled_ops: rewrite_options.scoped_allocator_opts.enable_op.append(op) if self._collective_use_nccl_communication: config.experimental.collective_nccl = True if self._collective_device_filters: del config.device_filters[:] for f in self._collective_device_filters: config.device_filters.append(f) return config def _compute_gpu_options(self): """Build the GPUOptions proto.""" visible_device_list = [] virtual_devices = [] gpu_index = -1 memory_growths = set() for dev in self.list_physical_devices("GPU"): gpu_index += 1 if dev not in self._visible_device_list: continue growth = self._memory_growth_map[dev] memory_growths.add(growth) visible_device_list.append(str(gpu_index)) if self._virtual_device_map: vdevs = self._virtual_device_map.get(dev, []) device_limits = [] for virt_dev in vdevs: device_limits.append(virt_dev.memory_limit) virtual_devices.append( config_pb2.GPUOptions.Experimental.VirtualDevices( memory_limit_mb=device_limits)) # Only compute growth if virtual devices have not been configured and we # have GPUs if not virtual_devices and memory_growths: if len(memory_growths) > 1: raise ValueError("Memory growth cannot differ between GPU devices") allow_growth = memory_growths.pop() else: allow_growth = None return config_pb2.GPUOptions( allow_growth=allow_growth, visible_device_list=",".join(visible_device_list), experimental=config_pb2.GPUOptions.Experimental( virtual_devices=virtual_devices)) @property def function_call_options(self): """Returns function call options for current thread. Note that the returned object is still referenced by the eager context. Returns: the FunctionCallOptions for current thread. """ if self._thread_local_data.function_call_options is None: config = self.config # Default to soft placement for functions unless specified if self._soft_device_placement is None: config.allow_soft_placement = True self._thread_local_data.function_call_options = FunctionCallOptions( config_proto=config) return self._thread_local_data.function_call_options @function_call_options.setter def function_call_options(self, options): """Returns function call options for current thread.""" self._thread_local_data.function_call_options = options def num_gpus(self): """The number of GPUs available to execute operations.""" self.ensure_initialized() return self._num_gpus def add_function(self, fn): """Add a function definition to the context. Once added, the function (identified by its name) can be executed like any other operation. Args: fn: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper). """ self.ensure_initialized() pywrap_tensorflow.TFE_ContextAddFunction(self._handle, fn) def add_function_def(self, fdef): """Add a function definition to the context. Once added, the function (identified by its name) can be executed like any other operation. Args: fdef: A FunctionDef protocol buffer message. """ self.ensure_initialized() fdef_string = fdef.SerializeToString() pywrap_tensorflow.TFE_ContextAddFunctionDef( self._handle, fdef_string, len(fdef_string)) def remove_function(self, name): """Remove a function from the context. Once removed, the function cannot be executed anymore. Args: name: function signature name. """ self.ensure_initialized() pywrap_tensorflow.TFE_ContextRemoveFunction(self._handle, name) def has_function(self, name): """Check if a function `name` is registered.""" self.ensure_initialized() return bool(pywrap_tensorflow.TFE_ContextHasFunction(self._handle, name)) def add_post_execution_callback(self, callback): """Add a post-execution callback to the context. A post-execution callback is invoked immediately after an eager operation or function has finished execution, providing access to the op's type, name input and output tensors. Multiple execution callbacks can be added, in which case the callbacks will be invoked in the order in which they are added. Args: callback: a callable of the signature `f(op_type, op_name, attrs, inputs, outputs)`. `op_type` is the type of the operation that was just executed (e.g., `MatMul`). `op_name` is the name of the operation that has was just executed. This name is set by the client who created the operation and can be `None` if it is unset. `attrs` contains the attributes of the operation as a `tuple` of alternating attribute names and attribute values. `inputs` is the `list` of input `Tensor`(s) to the op. `outputs` is the `list` of output `Tensor`(s) from the op. Return value(s) from the callback are ignored. """ self.post_execution_callbacks.append(callback) def clear_post_execution_callbacks(self): """Clear all post-execution callbacks added to the context.""" del self.post_execution_callbacks[:] @property def post_execution_callbacks(self): """Get the list of post-execution callbacks added to the context.""" if not hasattr(_post_execution_callbacks, "callbacks"): _post_execution_callbacks.callbacks = [] return _post_execution_callbacks.callbacks def _initialize_physical_devices(self): """Get local devices visible to the system.""" # We lazy initialize self._physical_devices since we do not want to do this # the constructor since the backend may not be initialized yet. with self._device_lock: if self._physical_devices is not None: return devs = pywrap_tensorflow.TF_ListPhysicalDevices() self._physical_devices = [ PhysicalDevice(name=d.decode(), device_type=d.decode().split(":")[1]) for d in devs] # Construct the visible device list from all physical devices but ignore # XLA devices self._visible_device_list = [ d for d in self._physical_devices if not d.device_type.startswith("XLA") ] self._memory_growth_map = { d: None for d in self._physical_devices if d.device_type == "GPU" } # Import device settings that may have been passed into the constructor self._import_config() def list_physical_devices(self, device_type=None): """List local devices visible to the system. This API allows a client to query the devices before they have been initialized by the eager runtime. Additionally a user can filter by device type, to get only CPUs or GPUs. Args: device_type: Optional device type to limit results to Returns: List of PhysicalDevice objects. """ self._initialize_physical_devices() if device_type is not None: return [ d for d in self._physical_devices if device_type is None or device_type == d.device_type ] return self._physical_devices def _import_config(self): """Import config if passed in during construction. If Context was created with a ConfigProto such as when calling tf.compat.v1.enable_eager_execution(), then we need to pull out the various pieces we might be replacing and import then into our internal class representation. """ if self._config is None: return num_cpus = self._config.device_count.get("CPU", 1) if num_cpus != 1: cpus = [d for d in self._physical_devices if d.device_type == "CPU"] if num_cpus == 0: self.set_visible_devices([], "CPU") elif num_cpus > 1: self.set_virtual_device_configuration( cpus[0], [VirtualDeviceConfiguration() for _ in range(num_cpus)]) # Parse GPU options gpus = [d for d in self._physical_devices if d.device_type == "GPU"] # If there are no GPUs detected, simply ignore all the GPU options passed in # rather than doing any validation checks. if not gpus: return gpu_count = self._config.device_count.get("GPU", None) visible_gpus = [] # TODO(gjn): Handle importing existing virtual GPU configuration visible_indices = self._config.gpu_options.visible_device_list if visible_indices: for index in visible_indices.split(","): if int(index) >= len(gpus): raise ValueError("Invalid visible device index: %s" % index) visible_gpus.append(gpus[int(index)]) else: visible_gpus = gpus if gpu_count is not None: visible_gpus = visible_gpus[:gpu_count] self.set_visible_devices(visible_gpus, "GPU") def list_logical_devices(self, device_type=None): """Return logical devices.""" self.ensure_initialized() devices = [] for dev in self._logical_devices: if device_type is not None and device_type != dev.device_type: continue devices.append(dev) return devices def get_visible_devices(self, device_type=None): """Get the list of visible devices.""" self._initialize_physical_devices() if device_type is None: return self._visible_device_list else: return [ d for d in self._visible_device_list if d.device_type == device_type ] def set_visible_devices(self, devices, device_type=None): """Set the list of visible devices.""" self._initialize_physical_devices() if not isinstance(devices, list): devices = [devices] for d in devices: if d not in self._physical_devices: raise ValueError("Unrecognized device: %s" % repr(d)) if device_type is not None and d.device_type != device_type: raise ValueError("Unrecognized device: %s" % repr(d)) visible_device_list = [] if device_type is not None: visible_device_list = [ d for d in self._visible_device_list if d.device_type != device_type ] visible_device_list += devices if self._visible_device_list == visible_device_list: return if self._context_handle is not None: raise RuntimeError( "Visible devices cannot be modified after being initialized") self._visible_device_list = visible_device_list def get_memory_growth(self, dev): """Get if memory growth is enabled for a PhysicalDevice.""" self._initialize_physical_devices() if dev not in self._physical_devices: raise ValueError("Unrecognized device: %s" % repr(dev)) return self._memory_growth_map[dev] def set_memory_growth(self, dev, enable): """Set if memory growth should be enabled for a PhysicalDevice.""" self._initialize_physical_devices() if dev not in self._physical_devices: raise ValueError("Unrecognized device: %s" % repr(dev)) if dev in self._virtual_device_map: raise ValueError( "Cannot set memory growth on device when virtual devices configured") if dev.device_type != "GPU": raise ValueError("Cannot set memory growth on non-GPU devices") if self._memory_growth_map.get(dev) == enable: return if self._context_handle is not None: raise RuntimeError( "Physical devices cannot be modified after being initialized") self._memory_growth_map[dev] = enable def get_virtual_device_configuration(self, dev): """Get the virtual device configuration for a PhysicalDevice.""" self._initialize_physical_devices() if dev not in self._physical_devices: raise ValueError("Unrecognized device: %s" % repr(dev)) return self._virtual_device_map.get(dev) def set_virtual_device_configuration(self, dev, virtual_devices): """Set the virtual device configuration for a PhysicalDevice.""" self._initialize_physical_devices() if dev not in self._physical_devices: raise ValueError("Unrecognized device: %s" % repr(dev)) if dev.device_type == "CPU": for vdev in virtual_devices: if vdev.memory_limit is not None: raise ValueError("Setting memory limit on CPU virtual devices is " "currently not supported") elif dev.device_type == "GPU": for vdev in virtual_devices: if vdev.memory_limit is None: raise ValueError( "Setting memory limit is required for GPU virtual devices is") else: raise ValueError("Virtual devices are not supported for %s" % dev.device_type()) if self._virtual_device_map.get(dev) == virtual_devices: return if self._context_handle is not None: raise RuntimeError( "Virtual devices cannot be modified after being initialized") self._virtual_device_map[dev] = virtual_devices @property def optimizer_jit(self): level = self.config.graph_options.optimizer_options.global_jit_level return (level == config_pb2.OptimizerOptions.ON_1 or level == config_pb2.OptimizerOptions.ON_2) @optimizer_jit.setter def optimizer_jit(self, enabled): self._optimizer_jit = enabled self._thread_local_data.function_call_options = None def get_optimizer_experimental_options(self): """Get experimental options for the optimizer. Returns: Dictionary of current option values """ rewrite_options = self.config.graph_options.rewrite_options options = {} def rewriter_toggle(option): attr = getattr(rewrite_options, option) if attr != 0: options[option] = (attr == rewriter_config_pb2.RewriterConfig.ON) def rewriter_bool(option): options[option] = getattr(rewrite_options, option) rewriter_toggle("layout_optimizer") rewriter_toggle("constant_folding") rewriter_toggle("shape_optimization") rewriter_toggle("remapping") rewriter_toggle("arithmetic_optimization") rewriter_toggle("dependency_optimization") rewriter_toggle("loop_optimization") rewriter_toggle("function_optimization") rewriter_toggle("debug_stripper") rewriter_bool("disable_model_pruning") rewriter_toggle("scoped_allocator_optimization") rewriter_toggle("pin_to_host_optimization") rewriter_toggle("implementation_selector") rewriter_toggle("auto_mixed_precision") rewriter_bool("disable_meta_optimizer") if rewrite_options.min_graph_nodes != 0: options["min_graph_nodes"] = rewrite_options.min_graph_nodes return options def set_optimizer_experimental_options(self, options): """Set experimental options for the optimizer. Args: options: Dictionary of options to modify """ self._optimizer_experimental_options.update(options) self._thread_local_data.function_call_options = None @property def intra_op_parallelism_threads(self): return self.config.intra_op_parallelism_threads @intra_op_parallelism_threads.setter def intra_op_parallelism_threads(self, num_threads): if self._intra_op_parallelism_threads == num_threads: return if self._context_handle is not None: raise RuntimeError( "Intra op parallelism cannot be modified after initialization.") self._intra_op_parallelism_threads = num_threads @property def inter_op_parallelism_threads(self): return self.config.inter_op_parallelism_threads @inter_op_parallelism_threads.setter def inter_op_parallelism_threads(self, num_threads): if self._inter_op_parallelism_threads == num_threads: return if self._context_handle is not None: raise RuntimeError( "Inter op parallelism cannot be modified after initialization.") self._inter_op_parallelism_threads = num_threads @property def soft_device_placement(self): return self.config.allow_soft_placement @soft_device_placement.setter def soft_device_placement(self, enabled): self._soft_device_placement = enabled self._thread_local_data.function_call_options = None @property def log_device_placement(self): return self.config.log_device_placement @log_device_placement.setter def log_device_placement(self, enabled): if self._log_device_placement == enabled: return if self._context_handle is not None: raise RuntimeError( "Device placement logging must be set at program startup") self._log_device_placement = enabled self._thread_local_data.function_call_options = None @property def device_policy(self): # Only get the policy from the context if it has already been initialized if self._context_handle is not None: return pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(self._handle) return self._device_policy @device_policy.setter def device_policy(self, policy): if policy is None: policy = DEVICE_PLACEMENT_SILENT if self._device_policy != policy: self._device_policy = policy # Only set the policy if the context has already been initialized if self._context_handle is not None: pywrap_tensorflow.TFE_ContextSetThreadLocalDevicePlacementPolicy( self._handle, self._device_policy) @property def mirroring_policy(self): # Only get the policy from the context if it has already been initialized if self._context_handle is not None: return pywrap_tensorflow.TFE_ContextGetMirroringPolicy(self._handle) return self._mirroring_policy @mirroring_policy.setter def mirroring_policy(self, policy): if policy is None: policy = MIRRORING_NONE if self._mirroring_policy != policy: self._mirroring_policy = policy # Only set the policy if the context has already been initialized if self._context_handle is not None: pywrap_tensorflow.TFE_ContextSetThreadLocalMirroringPolicy( self._handle, self._mirroring_policy) def enable_run_metadata(self): """Enables tracing of op execution via RunMetadata. To retrieve the accumulated metadata call context.export_run_metadata() and to stop tracing call context.disable_run_metadata(). """ self.ensure_initialized() pywrap_tensorflow.TFE_ContextEnableRunMetadata(self._handle) def disable_run_metadata(self): """Disables tracing of op execution via RunMetadata.""" if not self._context_handle: return pywrap_tensorflow.TFE_ContextDisableRunMetadata(self._context_handle) def enable_graph_collection(self): """Enables graph collection of executed functions. To retrieve the accumulated graphs call context.export_run_metadata() and to stop collecting graphs call context.disable_graph_collection(). """ self.ensure_initialized() pywrap_tensorflow.TFE_ContextEnableGraphCollection(self._handle) def disable_graph_collection(self): """Disables graph collection of executed functions.""" if not self._context_handle: return pywrap_tensorflow.TFE_ContextDisableGraphCollection(self._context_handle) def export_run_metadata(self): """Returns a RunMetadata proto with accumulated information. The returned protocol buffer contains information since the most recent call to either enable_run_metadata or export_run_metadata. Returns: A RunMetadata protocol buffer. Or None if not enabled. """ if not self._context_handle: return None with c_api_util.tf_buffer() as buffer_: pywrap_tensorflow.TFE_ContextExportRunMetadata( self._context_handle, buffer_) proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_) run_metadata = config_pb2.RunMetadata() run_metadata.ParseFromString(compat.as_bytes(proto_data)) return run_metadata @property def context_switches(self): """Returns a stack of context switches.""" return self._context_switches def start_step(self): pywrap_tensorflow.TFE_ContextStartStep(self._handle) def end_step(self): pywrap_tensorflow.TFE_ContextEndStep(self._handle) class _EagerDeviceContext(object): """Context-manager forcing placement of ops and Tensors on a device.""" def __init__(self, ctx, device_name): self._device_name = device_name self._ctx = ctx self._stack = [] def __enter__(self): ctx = self._ctx old_device_name = ctx.device_name old_device_spec = ctx.device_spec new_device_name = self._device_name cache_key = (old_device_name, new_device_name) try: new_device_name, new_device_spec = _device_parsing_cache[cache_key] except TypeError: # Error while trying to compute the cache key. raise ValueError("Expecting a string device name. Got %s(%s)" % (type(new_device_name), new_device_name)) except KeyError: # Handle a cache miss. if new_device_name is not None: if not isinstance(new_device_name, six.string_types): raise ValueError("Expecting a string device name. Got %s(%s)" % (type(new_device_name), new_device_name)) device_spec = pydev.DeviceSpec.from_string(new_device_name) if old_device_name: new_device_spec = copy.copy(old_device_spec) else: ctx.ensure_initialized() new_device_spec = pydev.DeviceSpec.from_string( ctx._context_devices[0]) # pylint: disable=protected-access new_device_spec = new_device_spec.make_merged_spec(device_spec) else: new_device_spec = pydev.DeviceSpec.from_string("") new_device_name = new_device_spec.to_string() _device_parsing_cache[cache_key] = (new_device_name, new_device_spec) ctx._set_device(new_device_name, new_device_spec) # pylint: disable=protected-access self._stack.append((old_device_name, old_device_spec, new_device_spec)) def __exit__(self, *ex_info): ctx = self._ctx old_device_name, old_device_spec, new_device_spec = self._stack[-1] if ctx.device_spec is not new_device_spec: raise RuntimeError( "Exiting device scope without proper scope nesting") del self._stack[-1] ctx._set_device(old_device_name, old_device_spec) # pylint: disable=protected-access # Do not set directly. Use _set_context. _context = None _context_lock = threading.Lock() def _set_context_locked(ctx): global _context pywrap_tensorflow.TFE_Py_SetEagerContext(ctx) _context = ctx def _set_context(ctx): with _context_lock: _set_context_locked(ctx) def _create_context(): with _context_lock: if _context is None: ctx = Context() _set_context_locked(ctx) def context(): """Returns a singleton context object.""" if _context is None: _create_context() return _context def context_safe(): """Returns current context (or None if one hasn't been initialized).""" return _context def ensure_initialized(): """Initialize the context.""" context().ensure_initialized() def set_global_seed(seed): """Sets the eager mode seed.""" context()._set_global_seed(seed) # pylint: disable=protected-access def global_seed(): """Returns the eager mode seed.""" return context()._seed # pylint: disable=protected-access def internal_operation_seed(): """Returns the operation seed generated based on global seed.""" return context()._internal_operation_seed() # pylint: disable=protected-access @tf_export("executing_eagerly") def executing_eagerly(): """Returns True if the current thread has eager execution enabled. Eager execution is typically enabled via `tf.compat.v1.enable_eager_execution`, but may also be enabled within the context of a Python function via tf.contrib.eager.py_func. """ if context_safe() is None: return default_execution_mode == EAGER_MODE return context().executing_eagerly() def in_eager_mode(): """Use executing_eagerly() instead. This function will be removed.""" return executing_eagerly() def shared_name(name=None): """Returns the anonymous shared name GUID if no shared name is specified. In eager mode we need to use a unique shared name to avoid spurious sharing issues. The runtime generates a unique name on our behalf when the reserved GUID is used as a shared name. Args: name: Optional shared name Returns: Eager compatible shared name. """ if name or not executing_eagerly(): return name # Ensure a unique name when eager execution is enabled to avoid spurious # sharing issues. return "cd2c89b7-88b7-44c8-ad83-06c2a9158347" def graph_mode(): """Context-manager to disable eager execution for the current thread.""" return context()._mode(GRAPH_MODE) # pylint: disable=protected-access def eager_mode(): """Context-manager to enable eager execution for the current thread.""" return context()._mode(EAGER_MODE) # pylint: disable=protected-access # TODO(agarwal): get rid of this and use ops.name_scope instead. @contextlib.contextmanager def namescope(name): """ContextManager for creating hierarchical name scopes.""" ctx = context() old_name = ctx.scope_name ctx.scope_name = "%s/%s" % (old_name, name) if old_name else name try: yield finally: ctx.scope_name = old_name def scope_name(): """Name of the current scope.""" return context().scope_name def device(name): """Context-manager to force placement of operations and Tensors on a device. Example: ```python with tf.device('gpu:0'): with tf.device('cpu:0'): shape = tf.constant([], dtype=tf.int32) x = tf.random.truncated_normal(shape, tf.float32) ``` will ensure that the `shape` Tensor is on CPU but the `truncated_normal` operation runs on GPU 0. Args: name: Name of the device (see context().devices()), or None to perform automatic placement. Returns: Context manager for setting the device. """ ensure_initialized() return context().device(name) @tf_export("config.experimental_list_devices") def list_devices(): """List the names of the available devices. Returns: Names of the available devices, as a `list`. """ ensure_initialized() return context().devices() @tf_export("debugging.get_log_device_placement") def get_log_device_placement(): """Get if device placements are logged. Returns: If device placements are logged. """ return context().log_device_placement @tf_export("debugging.set_log_device_placement") def set_log_device_placement(enabled): """Set if device placements should be logged. Args: enabled: Whether to enabled device placement logging. """ context().log_device_placement = enabled @tf_contextlib.contextmanager def device_policy(policy): """Context manager for setting device placement policy for current thread.""" ctx = context() old_policy = ctx.device_policy try: ctx.device_policy = policy yield finally: ctx.device_policy = old_policy @tf_contextlib.contextmanager def mirroring_policy(policy): """Context manager for setting mirroring policy for current thread.""" ctx = context() old_policy = ctx.mirroring_policy try: ctx.mirroring_policy = policy yield finally: ctx.mirroring_policy = old_policy def set_execution_mode(mode): """Sets execution mode for the current thread.""" context().execution_mode = mode # TODO(fishx): remove this method. @tf_contextlib.contextmanager def execution_mode(mode): """Context manager for setting execution mode for current thread.""" ctx = context() executor_new = executor.new_executor(mode == ASYNC) executor_old = ctx.executor try: executor_old.wait() ctx.executor = executor_new yield finally: ctx.executor = executor_old executor_new.wait() @tf_contextlib.contextmanager def executor_scope(e): """Context manager for changing executor for current thread. Args: e: A Executor to execute eager ops under this scope. Setting it to None will switch back to use the default executor for the context. Yields: Context manager for setting the executor for current thread. """ ctx = context() executor_old = ctx.executor try: ctx.executor = e yield finally: ctx.executor = executor_old @tf_export("experimental.function_executor_type") @tf_contextlib.contextmanager def function_executor_type(executor_type): """Context manager for setting the executor of eager defined functions. Eager defined functions are functions decorated by tf.contrib.eager.defun. Args: executor_type: a string for the name of the executor to be used to execute functions defined by tf.contrib.eager.defun. Yields: Context manager for setting the executor of eager defined functions. """ current_options = context().function_call_options old_options = copy.copy(current_options) try: current_options.executor_type = executor_type yield finally: context().function_call_options = old_options def is_async(): """Returns true if current thread is in async mode.""" return context().is_async() def async_wait(): """Waits for ops dispatched in ASYNC mode to finish.""" return context().executor.wait() def async_clear_error(): """Clears errors raised during ASYNC execution mode.""" return context().executor.clear_error() def num_gpus(): """Get the number of available GPU devices. Returns: The number of available GPU devices. """ return context().num_gpus() def enable_run_metadata(): """Enables tracing of op execution via RunMetadata. To retrieve the accumulated metadata call context.export_run_metadata() and to stop tracing call context.disable_run_metadata(). """ context().enable_run_metadata() def disable_run_metadata(): """Disables tracing of op execution via RunMetadata.""" context().disable_run_metadata() def enable_graph_collection(): """Enables graph collection of executed functions. To retrieve the accumulated graphs call context.export_run_metadata() and to stop collecting graphs call context.disable_graph_collection(). """ context().enable_graph_collection() def disable_graph_collection(): """Disables graph collection of executed functions.""" context().disable_graph_collection() def export_run_metadata(): """Returns a RunMetadata proto with accumulated information. The returned protocol buffer contains information since the most recent call to either enable_run_metadata or export_run_metadata. Returns: A RunMetadata protocol buffer. """ return context().export_run_metadata() def set_server_def(server_def): context().set_server_def(server_def) def add_function(fdef): """Add a function definition to the context.""" context().add_function(fdef) def remove_function(name): """Remove a function from the context.""" context().remove_function(name) # Not every user creates a Context via context.context() # (for example, enable_eager_execution in python/framework/ops.py), # but they do all import this file. Note that IS_IN_GRAPH_MODE and # in_graph_mode are both parameterless functions. def _tmp_in_graph_mode(): if context_safe() is None: # Context not yet initialized. Assume graph mode following the # default implementation in `is_in_graph_mode`. return True return not executing_eagerly() is_in_graph_mode.IS_IN_GRAPH_MODE = _tmp_in_graph_mode
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/context.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic tests for gradients.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops # Importing nn_grad for the registration functions. from tensorflow.python.ops import nn_grad # pylint: disable=unused-import from tensorflow.python.ops import nn_ops @custom_gradient.custom_gradient def two_outputs(a, b): mm = math_ops.matmul(a, b) r = math_ops.reduce_sum(mm) def grad(dmm, dr): return [ math_ops.matmul(dmm, b, transpose_b=True) + math_ops.matmul(array_ops.ones_like(b * dr), b, transpose_b=True), math_ops.matmul(a, dmm, transpose_b=True) + math_ops.matmul(a, array_ops.ones_like(a) * dr, transpose_b=True) ] return [mm, r], grad @custom_gradient.custom_gradient def gradient_is_constant(x): result = x * x def grad(dr): return [dr] return result, grad class TapeTest(test.TestCase): def testMultiOutput(self): def fn(x, y): c = x + y # Multiple outputs from split. d, f = array_ops.split(c, 2) return d + f a = constant_op.constant([[1., 0.], [0., 1.]]) b = constant_op.constant([[1., 2.], [3., 4.]]) da, db = backprop.gradients_function(fn, [0, 1])(a, b) with context.graph_mode(), self.cached_session(): tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32) tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32) tf_c = tf_a + tf_b tf_d, tf_f = array_ops.split(tf_c, 2, axis=1) tf_e = tf_d + tf_f tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b]) self.assertAllEqual(da, self.evaluate(tf_da)) self.assertAllEqual(db, self.evaluate(tf_db)) def testBasicFunctional(self): def forward(a, b): mm = math_ops.matmul(a, b) return math_ops.reduce_sum(mm) aa = constant_op.constant([[1., 0.], [0., 1.]]) bb = constant_op.constant([[1., 2.], [3., 4.]]) da, = backprop.gradients_function(forward, ['a'])(aa, bb) self.assertAllEqual(da, math_ops.matmul( array_ops.ones_like(aa), array_ops.transpose(bb)).numpy()) def testBasicFunctionalPositionalArg(self): def forward(a, b): mm = math_ops.matmul(a, b) return math_ops.reduce_sum(mm) aa = constant_op.constant([[1., 0.], [0., 1.]]) bb = constant_op.constant([[1., 2.], [3., 4.]]) da, = backprop.gradients_function(forward, [0])(aa, bb) self.assertAllEqual(da, math_ops.matmul( array_ops.ones_like(aa), array_ops.transpose(bb)).numpy()) def testBasicFunctionalWithValue(self): def forward(a, b): mm = math_ops.matmul(a, b) return math_ops.reduce_sum(mm) aa = constant_op.constant([[1., 0.], [0., 1.]]) bb = constant_op.constant([[1., 2.], [3., 4.]]) val, (da,) = backprop.val_and_grad_function(forward, ['a'])(aa, bb) self.assertAllEqual(da, math_ops.matmul( array_ops.ones_like(aa), array_ops.transpose(bb))) self.assertAllEqual(val, forward(aa, bb)) def testTwoOutputs(self): def fn(x, y): mm, r = two_outputs(x, y) return r + math_ops.reduce_sum(mm) a = constant_op.constant([[1., 0.], [0., 1.]]) b = constant_op.constant([[1., 2.], [3., 4.]]) da, db = backprop.gradients_function(fn, [0, 1])(a, b) with context.graph_mode(), self.cached_session(): tf_a = constant_op.constant([[1, 0], [0, 1]], dtype=dtypes.float32) tf_b = constant_op.constant([[1, 2], [3, 4]], dtype=dtypes.float32) tf_mm = math_ops.matmul(tf_a, tf_b) tf_rr = 2 * math_ops.reduce_sum(tf_mm) tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b]) self.assertAllEqual(da, self.evaluate(tf_da)) self.assertAllEqual(db, self.evaluate(tf_db)) def testGcTwoOutputs(self): def fn(x, y): return nn_ops.sparse_softmax_cross_entropy_with_logits(logits=x, labels=y)[0] labels = constant_op.constant([0]) logits = constant_op.constant([[0.0]]) grad, = backprop.gradients_function(fn, [0])(logits, labels) self.assertAllEqual(grad, [[0.0]]) def testTfTensor(self): def fn(x): return x t = constant_op.constant(1.0) g, = backprop.gradients_function(fn, [0])(t) self.assertAllEqual(g, 1.0) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/tape_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for device placement.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.eager import remote from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import test_util class SoftDevicePlacementTest(test.TestCase): def setUp(self): context.context().soft_device_placement = True context.context().log_device_placement = True @test_util.run_gpu_only def testDefaultPlacement(self): a = constant_op.constant(1) b = constant_op.constant(2) c = a + b with ops.device('CPU'): d = a + b self.assertIn('GPU', c.device) self.assertIn('CPU', d.device) @test_util.run_gpu_only def testUnsupportedDevice(self): a = constant_op.constant(1) b = constant_op.constant(2) s = constant_op.constant(list('hello world')) with ops.device('GPU:0'): c = a + b t = s[a] self.assertIn('GPU:0', c.device) self.assertIn('CPU', t.device) @test_util.run_gpu_only def testUnknownDevice(self): a = constant_op.constant(1) b = constant_op.constant(2) with ops.device('GPU:42'): c = a + b self.assertIn('GPU:0', c.device) def testNoGpu(self): if test_util.is_gpu_available(): # CPU only test. return a = constant_op.constant(1) b = constant_op.constant(2) c = a + b with ops.device('GPU'): d = a + b self.assertIn('CPU', c.device) self.assertIn('CPU', d.device) @test_util.run_gpu_only def testNestedDeviceScope(self): a = constant_op.constant(1) b = constant_op.constant(2) with ops.device('CPU:0'): with ops.device('GPU:42'): c = a + b # We don't support nested device placement right now. self.assertIn('GPU:0', c.device) class ClusterPlacementTest(test.TestCase): def setUp(self): context.context().soft_device_placement = True context.context().log_device_placement = True workers, _ = test_util.create_local_cluster(2, 0) remote.connect_to_remote_host([workers[0].target, workers[1].target]) def testNotFullySpecifiedTask(self): a = constant_op.constant(1) b = constant_op.constant(2) with ops.device('/job:worker'): c = a + b self.assertIn('/job:worker/replica:0/task:0', c.device) def testRemoteUnknownDevice(self): a = constant_op.constant(1) b = constant_op.constant(2) # Right now we don't support soft device place on remote worker. with self.assertRaises(errors.InvalidArgumentError) as cm: with ops.device('/job:worker/replica:0/task:0/device:GPU:42'): c = a + b del c self.assertIn('unknown device', cm.exception.message) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/device_placement_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for forward-mode automatic differentiation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import execute from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest # TODO(allenl): experimental_relax_shapes for gradients which rely on static # shape information may be underspecialized. We may want hand-written forward # implementations. @def_function.function(experimental_relax_shapes=True) def _forward_gradient(op_name, attr_tuple, inputs, outputs, tangents): """Computes a Jacobian-vector product for an op. Note that this function would be wasteful if executed eagerly. It runs the backward gradient function and throws away the result just to record its operations on a GradientTape. These unused ops are pruned away when this function is traced. Args: op_name: A string, the type of operation being executed. attr_tuple: Attributes of the operation. inputs: A flat list of input Tensors to the operation. outputs: A flat list of output Tensors from the operation. tangents: A flat list of Tensors, same shape as `inputs`. Returns: A flat list of tangents corresponding to `outputs`. """ float_inputs = [] float_indices = [] nontrivial_tangents = [] for input_index, tensor in enumerate(inputs): if tensor.dtype.is_floating: float_inputs.append(tensor) float_indices.append(input_index) nontrivial_tangents.append(tangents[input_index]) with backprop.GradientTape() as transpose_tape: with backprop.GradientTape() as backfunc_tape: backfunc_tape.watch(float_inputs) execute.record_gradient(op_name, inputs, attr_tuple, outputs, "forward_op_replay") forwardprop_aids = [] float_outputs = [] nontrivial_output_indices = [] for output_index, output in enumerate(outputs): if output.dtype.is_floating: forwardprop_aids.append( array_ops.ones_like(output, name="unused_forwardprop_aid")) float_outputs.append(output) nontrivial_output_indices.append(output_index) transpose_tape.watch(forwardprop_aids) grads = backfunc_tape.gradient( float_outputs, float_inputs, forwardprop_aids, unconnected_gradients=UnconnectedGradients.ZERO) nontrivial_output_tangents = transpose_tape.gradient( grads, forwardprop_aids, output_gradients=nontrivial_tangents) output_tangents = [None] * len(outputs) for index, tangent in zip(nontrivial_output_indices, nontrivial_output_tangents): output_tangents[index] = tangent return output_tangents pywrap_tensorflow.TFE_Py_RegisterForwardGradientFunction(_forward_gradient) class ForwardGradientAccumulator(object): """Computes Jacobian-vector products using forward-mode autodiff. Example: ``` with ForwardGradientAccumulator() as acc: x = tf.constant([[2.0, 3.0], [1.0, 4.0]]) acc.watch(x, tf.constant([[5., 6.], [7., 8.]])) y = tf.reduce_sum(tf.sin(x) * tf.tan(x), axis=1) jvp = acc.jvp(y) ``` Note that `ForwardGradientAccumulator`s are always applied in creation order, so inner accumulators may not see JVP computation from outer accumulators. Take higher-order gradients from outer accumulators: ``` primal = tf.constant(1.1) with ForwardGradientAccumulator() as outer_acc: outer_acc.watch(primal, tf.constant(1.)) with ForwardGradientAccumulator() as acc: acc.watch(primal, tf.constant(1.)) primal_out = primal ** tf.constant(3.5) inner_jvp = acc.jvp(primal_out) outer_jvp = outer_acc.jvp(inner_jvp) ``` Reversing the collection in the last two lines to instead retrieve `acc.jvp(outer_acc.jvp(primal_out))` will not work. """ def __init__(self): self._accumulator = None self._recording = False def __enter__(self): self._push_accumulator() return self def __exit__(self, typ, value, traceback): if self._recording: self._pop_accumulator() def _push_accumulator(self): if self._recording: raise ValueError("Accumulator is already recording.") if self._accumulator is None: self._accumulator = pywrap_tensorflow.TFE_Py_ForwardAccumulatorNew() else: # TODO(allenl): Allow reuse raise NotImplementedError("Accumulator reuse isn't implemented yet.") self._recording = True def _pop_accumulator(self): if not self._recording: raise ValueError("Tape is not recording.") pywrap_tensorflow.TFE_Py_ForwardAccumulatorSetRemove(self._accumulator) self._recording = False # TODO(allenl): Does this need to be public, or should the constructor instead # take all watched Tensors? Write a realistic usage example (e.g. Hessian-free # optimization) and decide. def watch(self, tensor, tangents): """Ensures that `tensor` is being traced by this tape. Mathematically, `tangents` is part of a vector right-multiplying the Jacobian matrix (a Jacobian-vector product) for the function computed while the tape is active. Since JVPs are computed in forward mode as the computation happens, this vector must be supplied before the computation takes place. Watching a single Tensor multiple times sums each `tangents`. An un-watched Tensor has zeros for its tangent vector. Args: tensor: A Tensor or list of Tensors. tangents: A Tensor or list of Tensors matching `tensor`. """ nest.assert_same_structure(tensor, tangents) for t, g in zip(nest.flatten(tensor), nest.flatten(tangents)): if not t.dtype.is_floating: logging.log_first_n( logging.WARN, "The dtype of the watched tensor must be " "floating (e.g. tf.float32), got %r", 5, t.dtype) if hasattr(t, "handle"): # TODO(allenl): Handle watching variables. raise NotImplementedError("Currently only Tensors may be watched.") g = ops.convert_to_tensor(g, dtype=t.dtype) pywrap_tensorflow.TFE_Py_ForwardAccumulatorWatch(self._accumulator, t, g) def jvp(self, target): """Fetches the Jacobian-vector product computed for `target`. Note that this function performs no computation, and simply looks up a JVP that was already computed (unlike backprop using a `tf.GradientTape`, where the computation happens on the call to `tape.gradient`). Args: target: A watched Tensor or structure of Tensors to fetch the JVPs for. Returns: Tensors with the same shapes and dtypes as `target`, or None if no JVP is available. """ if self._accumulator is None: raise ValueError("Called jvp() without first tracing anything.") return nest.map_structure( functools.partial(pywrap_tensorflow.TFE_Py_ForwardAccumulatorJVP, self._accumulator), target)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/forwardprop.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Profiler client APIs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow from tensorflow.python.framework import c_api_util from tensorflow.python.framework import errors def start_tracing(service_addr, logdir, duration_ms, worker_list='', include_dataset_ops=True, num_tracing_attempts=3): """Sends grpc requests to profiler server to perform on-demand profiling. This method will block caller thread until receives tracing result. Args: service_addr: Address of profiler service e.g. localhost:6009. logdir: Path of TensorBoard log directory e.g. /tmp/tb_log. duration_ms: Duration of tracing or monitoring in ms. worker_list: The list of worker TPUs that we are about to profile in the current session. (TPU only) include_dataset_ops: Set to false to profile longer traces. num_tracing_attempts: Automatically retry N times when no trace event is collected. Raises: UnavailableError: If no trace event is collected. """ if not pywrap_tensorflow.TFE_ProfilerClientStartTracing( service_addr, logdir, worker_list, include_dataset_ops, duration_ms, num_tracing_attempts): raise errors.UnavailableError(None, None, 'No trace event is collected.') def monitor(service_addr, duration_ms, monitoring_level=1, display_timestamp=False): """Sends grpc requests to profiler server to perform on-demand monitoring. This method will block caller thread until receives monitoring result. Args: service_addr: Address of profiler service e.g. localhost:6009. duration_ms: Duration of tracing or monitoring in ms. monitoring_level: Choose a monitoring level between 1 and 2 to monitor your job. Level 2 is more verbose than level 1 and shows more metrics. display_timestamp: Set to true to display timestamp in monitoring result. Returns: A string of monitoring output. """ with c_api_util.tf_buffer() as buffer_: pywrap_tensorflow.TFE_ProfilerClientMonitor(service_addr, duration_ms, monitoring_level, display_timestamp, buffer_) return pywrap_tensorflow.TF_GetBuffer(buffer_)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/profiler_client.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=unidiomatic-typecheck """Utility to lift subgraphs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.framework import func_graph from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import op_selector from tensorflow.python.ops import resource_variable_ops from tensorflow.python.util import compat from tensorflow.python.util import object_identity UnliftableError = op_selector.UnliftableError def _as_operation(op_or_tensor): if isinstance(op_or_tensor, ops.Tensor): return op_or_tensor.op return op_or_tensor def _constant_inputs(op_or_tensor): return all(_as_operation(i).type == u"Const" and not _as_operation(i).control_inputs for i in op_selector.graph_inputs(_as_operation(op_or_tensor))) # Represents an input to `copied_op` which must be updated once # `old_graph_tensor` has been copied. _InputMutation = collections.namedtuple( "_InputMutation", ["copied_op", "input_index", "old_graph_tensor"]) # Represents a control input to `copied_op` which must be added once # `old_graph_op` has been copied. _ControlMutation = collections.namedtuple( "_ControlMutation", ["copied_op", "old_graph_op"]) def _copy_non_source(op, graph, op_map, base_graph): """Copy an op directly to a given graph. Generally `op`'s inputs should already have been copied. If this is not the case, for example with v1 while_loops, then `_copy_non_source` inserts placeholders for the unavailable Tensors and returns a list of required mutations. Args: op: The op to be copied. graph: The destination graph. op_map: A dict mapping ops and tensors in the old graph to the new one. base_graph: The graph we're copying from, for any necessary functions. Returns: A tuple of (required_inputs, required_control_inputs): required_inputs: A list of `_InputMutation` tuples containing inputs to `copied_op` which must be updated once `old_graph_tensor` has been copied. required_control_inputs: A list of `_ControlMutation` tuples containing control inputs to `copied_op` which must be added once `old_graph_op` has been copied. """ input_mutations = [] control_mutations = [] copied_inputs = [] for input_index, original_input in enumerate(op.inputs): copied_input = op_map.get(original_input, None) if copied_input is None: # An input for this op is missing due to a loop in the graph. We'll insert # a placeholder for now and return information about the required post-hoc # mutation. copied_input = array_ops.placeholder( name="unused_control_flow_input", shape=original_input.shape, dtype=original_input.dtype) input_mutations.append( # `copied_op` is filled in below, after we've created it. _InputMutation(copied_op=None, input_index=input_index, old_graph_tensor=original_input)) copied_inputs.append(copied_input) copied_control_inputs = [] for original_control_input in op.control_inputs: copied_control_input = op_map.get(original_control_input, None) if copied_control_input is None: control_mutations.append( _ControlMutation(copied_op=None, old_graph_op=original_control_input)) else: copied_control_inputs.append(copied_control_input) # Don't copy over nodes with _tpu_replicate attribute. This attributed is used # to signal that the op was built inside a tpu_replicate context; if we're # lifting it to another graph we're similarly lifting it into another context. with ops.control_dependencies(copied_control_inputs), ops.device(op.device): # pylint: disable=protected-access f = base_graph._functions.get(op.type, None) if f is not None and compat.as_str(f.name) not in graph._functions: f.add_to_graph(graph) # pylint: enable=protected-access # Create a new op in the destination graph if it doesn't exist before. copied_op = graph.create_op( op_type=op.type, inputs=copied_inputs, dtypes=[x.dtype for x in op.outputs], attrs={ key: value for key, value in op.node_def.attr.items() if not key.startswith("_class") and not key.startswith("_tpu_replicate") }, # b/128981532. name=op.name) op_map[op] = copied_op for i, o in enumerate(op.outputs): op_map[o] = copied_op.outputs[i] return ([mutation._replace(copied_op=copied_op) for mutation in input_mutations], [mutation._replace(copied_op=copied_op) for mutation in control_mutations]) def _copy_source(s, graph, op_map, handle_captures, inverse_captures, base_graph): """Create a source in a graph based on a Tensor from a different graph. This function creates a placeholder analog of `s` in a graph with the following behavior: 1) If s is a captured Tensor or Variable and handle_captures is set to True, simply capture it in the new graph as well. 2) If s is a PlaceholderWithDefault whose default is a constant, preserve said default in the new graph. 3) When applicable, copy resource variable metadata from `s` to the newly created placeholder. Args: s: The source of interest. graph: The destination graph. op_map: A dict mapping ops and tensors in the old graph to the new one. handle_captures: A boolean indicating whether to re-capture s in the new graph or simply create a vanilla placeholder. inverse_captures: A dict mapping s back to the Tensor or Variable that it captures. base_graph: The graph being copied from. """ if handle_captures and s in inverse_captures: copied_placeholder = graph.capture(inverse_captures[s], name=s.op.name) elif s.op.type == "PlaceholderWithDefault" and _constant_inputs(s): # Copy the default value to the graph. default_value = s.op.inputs[0] unavailable_inputs, unavailable_control_inputs = _copy_non_source( op=default_value.op, graph=graph, op_map=op_map, base_graph=base_graph) if unavailable_inputs or unavailable_control_inputs: raise AssertionError( "Could not copy source node {} because it has inputs." .format(default_value)) with ops.device(s.op.device): copied_placeholder = array_ops.placeholder_with_default( input=op_map[default_value], shape=s.shape, name=s.op.name) else: with ops.device(s.op.device): copied_placeholder = array_ops.placeholder( dtype=s.dtype, shape=s.shape, name=s.op.name) base_handle = resource_variable_ops.get_resource_handle_data(s) if base_handle.shape_and_type: resource_variable_ops._set_handle_shapes_and_types( # pylint: disable=protected-access copied_placeholder, base_handle, graph_mode=True) op_map[s] = copied_placeholder # Add an entry for the op of the source tensor so that if there are any nodes # depending on that op via control dependencies it can work correctly. op_map[s.op] = copied_placeholder.op def lift_to_graph(tensors, graph, sources=None, disallowed_placeholders=None, add_sources=False, handle_captures=False, base_graph=None, op_map=None): """Copies the tensor and all its inputs recursively to the outer graph. Args: tensors: The Tensors to lift. graph: The graph to lift to. sources: Optional sequence of nodes to start from. If omitted the whole subgraph which feeds into `init_tensor` is lifted. disallowed_placeholders: An optional set of ops which may not appear in the lifted graph. Defaults to all placeholders. add_sources: A boolean indicating whether placeholders which are not in sources should be allowed. handle_captures: A boolean indicating whether to re-capture s in the new graph or simply create a vanilla placeholder. base_graph: The graph from which to lift ops. This will be inferred if not specified. op_map: A map contains all the existing nodes that have been lifted to the destination graph, so they won't be lifted and copied again. Returns: A mapping from ops in the current default graph to ops in `graph`. Raises: UnliftableError: If a placeholder blocks lifting. """ variable_init_tensors = [] init_tensors = [] for tensor in tensors: if isinstance(tensor, resource_variable_ops.ResourceVariable): variable_init_tensors.append(tensor) else: init_tensors.append(tensor) base_graph = base_graph or init_tensors[0].graph op_map = op_map or object_identity.ObjectIdentityDictionary() # Check that the initializer does not depend on any placeholders. sources = object_identity.ObjectIdentitySet(sources or []) visited_ops = set([x.op for x in sources]) op_outputs = collections.defaultdict(set) # First we extract the subgraph between init_tensors and sources. for init_tensor in init_tensors: sources.update(op_selector.map_subgraph( init_tensor=init_tensor, sources=sources, disallowed_placeholders=disallowed_placeholders, visited_ops=visited_ops, op_outputs=op_outputs, add_sources=add_sources)) # Try to topologically sort the nodes we've extracted. Now we know how many of # their outputs are part of this subgraph. ops_to_copy = [] marked_ops = set([]) ops_to_visit = [_as_operation(t) for t in init_tensors if not op_outputs[_as_operation(t)]] unvisited_ops = set(ops_to_visit) while unvisited_ops: while ops_to_visit: op = ops_to_visit.pop() if op in marked_ops: continue marked_ops.add(op) ops_to_copy.append(op) for inp in op_selector.graph_inputs(op): # Don't lift the TPUReplicateMetadata nodes out of the function, because # it has no registered kernels. if inp.name == "TPUReplicateMetadata": continue unvisited_ops.add(inp) if (all(x in marked_ops for x in op_outputs[inp]) and inp not in sources): ops_to_visit.append(inp) unvisited_ops.difference_update(marked_ops) if unvisited_ops: # `unvisited_ops` should only have elements if the graph has a loop. In # this case we want to keep copying and there's no topological ordering; # we'll do ugly post-hoc mutations instead. ops_to_visit.append(next(iter(unvisited_ops))) # When lifting from one FuncGraph to another, we will need to capture the # relevant tensors as well. captures = [] inverse_captures = object_identity.ObjectIdentityDictionary() internal_captures = [] if (isinstance(base_graph, func_graph.FuncGraph) and isinstance(graph, func_graph.FuncGraph)): captures = base_graph.captures for external_capture, internal_capture in captures: inverse_captures[internal_capture] = external_capture internal_captures = base_graph.internal_captures # ops_to_copy now holds a reverse topologically sorted list of ops which # ends in the initializer. We copy those to the outermost graph and # build the initialization op there. with graph.as_default(): for i in variable_init_tensors: op_map[i] = i source_ops = set() # Add the sources in the same order as the original graph. for s in internal_captures: if s in sources: sources.remove(s) source_ops.add(s.op) _copy_source( s=s, graph=graph, op_map=op_map, handle_captures=handle_captures, inverse_captures=inverse_captures, base_graph=base_graph) for s in sources: source_ops.add(s.op) _copy_source( s=s, graph=graph, op_map=op_map, handle_captures=handle_captures, inverse_captures=inverse_captures, base_graph=base_graph) input_mutations = [] control_mutations = [] for op in reversed(ops_to_copy): if op in source_ops or op in op_map: continue new_input_mutations, new_control_mutations = _copy_non_source( op=op, graph=graph, op_map=op_map, base_graph=base_graph) input_mutations.extend(new_input_mutations) control_mutations.extend(new_control_mutations) # Mutate the new graph to insert any loops which existed in the source # graph due to v1 while_loops. # # pylint: disable=protected-access with graph._mutation_lock(): for mutation in input_mutations: mutation.copied_op._update_input( mutation.input_index, op_map[mutation.old_graph_tensor]) for mutation in control_mutations: # Don't lift the TPUReplicateMetadata nodes out of the function, because # it has no registered kernels. if mutation.old_graph_op.name == "TPUReplicateMetadata": continue mutation.copied_op._add_control_input(op_map[mutation.old_graph_op]) # pylint: enable=protected-access return op_map
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/lift_to_graph.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for operations in eager execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gc import threading import weakref import numpy as np from tensorflow.python.eager import context from tensorflow.python.eager import execute from tensorflow.python.eager import test from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.layers import core from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import sparse_ops class OpsTest(test_util.TensorFlowTestCase): def testExecuteBasic(self): three = constant_op.constant(3) five = constant_op.constant(5) product = three * five self.assertAllEqual(15, product) @test_util.run_gpu_only def testMatMulGPU(self): three = constant_op.constant([[3.]]).gpu() five = constant_op.constant([[5.]]).gpu() product = math_ops.matmul(three, five) self.assertEqual([[15.0]], product.numpy()) def testExecuteStringAttr(self): three = constant_op.constant(3.0) checked_three = array_ops.check_numerics(three, message='just checking') self.assertEqual([[3]], checked_three.numpy()) def testExecuteFloatAttr(self): three = constant_op.constant(3.0) almost_three = constant_op.constant(2.8) almost_equal = math_ops.approximate_equal( three, almost_three, tolerance=0.3) self.assertTrue(almost_equal) def testExecuteIntAttr(self): three = constant_op.constant(3) four = constant_op.constant(4) total = math_ops.add_n([three, four]) self.assertAllEqual(7, total) def testExecuteBoolAttr(self): three = constant_op.constant([[3]]) five = constant_op.constant([[5]]) product = math_ops.matmul(three, five, transpose_a=True) self.assertAllEqual([[15]], product) def testExecuteOneListOutput(self): split_dim = constant_op.constant(1) value = constant_op.constant([[0, 1, 2], [3, 4, 5]]) x1, x2, x3 = array_ops.split(value, 3, axis=split_dim) self.assertAllEqual([[0], [3]], x1) self.assertAllEqual([[1], [4]], x2) self.assertAllEqual([[2], [5]], x3) def testGraphMode(self): graph = ops.Graph() with graph.as_default(), context.graph_mode(): array_ops.placeholder(dtypes.int32) self.assertEqual(1, len(graph.get_operations())) # See comments on handling of int32 tensors on GPU in # EagerTensor.__init__. @test_util.run_gpu_only def testInt32CPUDefault(self): with context.device('/gpu:0'): r = constant_op.constant(1) + constant_op.constant(2) self.assertAllEqual(r, 3) def testExecuteListOutputLen1(self): split_dim = constant_op.constant(1) value = constant_op.constant([[0, 1, 2], [3, 4, 5]]) result = array_ops.split(value, 1, axis=split_dim) self.assertTrue(isinstance(result, list)) self.assertEqual(1, len(result)) self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0]) def testExecuteListOutputLen0(self): empty = constant_op.constant([], dtype=dtypes.int32) result = array_ops.unstack(empty, 0) self.assertTrue(isinstance(result, list)) self.assertEqual(0, len(result)) def testExecuteMultipleNonListOutput(self): x = constant_op.constant([1, 2, 3, 4, 5, 6]) y = constant_op.constant([1, 3, 5]) result = array_ops.listdiff(x, y) out, idx = result self.assertTrue(out is result.out) self.assertTrue(idx is result.idx) self.assertAllEqual([2, 4, 6], out) self.assertAllEqual([1, 3, 5], idx) def testExecuteMultipleListOutput(self): split_dim = constant_op.constant(1, dtype=dtypes.int64) indices = constant_op.constant([[0, 2], [0, 4], [0, 5], [1, 0], [1, 1]], dtype=dtypes.int64) values = constant_op.constant([2, 3, 5, 7, 11]) shape = constant_op.constant([2, 7], dtype=dtypes.int64) result = sparse_ops.gen_sparse_ops.sparse_split( split_dim, indices, values, shape, num_split=2) output_indices, output_values, output_shape = result self.assertEqual(2, len(output_indices)) self.assertEqual(2, len(output_values)) self.assertEqual(2, len(output_shape)) self.assertEqual(output_indices, result.output_indices) self.assertEqual(output_values, result.output_values) self.assertEqual(output_shape, result.output_shape) self.assertAllEqual([[0, 2], [1, 0], [1, 1]], output_indices[0]) self.assertAllEqual([[0, 0], [0, 1]], output_indices[1]) self.assertAllEqual([2, 7, 11], output_values[0]) self.assertAllEqual([3, 5], output_values[1]) self.assertAllEqual([2, 4], output_shape[0]) self.assertAllEqual([2, 3], output_shape[1]) # TODO(josh11b): Test an op that has multiple outputs, some but not # all of which are lists. Examples: barrier_take_many (currently # unsupported since it uses a type list) or sdca_optimizer (I don't # have an example of legal inputs & outputs). def testComposition(self): x = constant_op.constant(1, dtype=dtypes.int32) three_x = x + x + x self.assertEquals(dtypes.int32, three_x.dtype) self.assertAllEqual(3, three_x) def testOperatorOverrides(self): def ops_test(v1, v2): a = constant_op.constant(v1) b = constant_op.constant(v2) self.assertAllEqual((-a), np.negative(v1)) self.assertAllEqual(abs(b), np.absolute(v2)) self.assertAllEqual((a + b), np.add(v1, v2)) self.assertAllEqual((a - b), np.subtract(v1, v2)) self.assertAllEqual((a * b), np.multiply(v1, v2)) self.assertAllEqual((a * a), np.multiply(v1, v1)) if all(x >= 0 for x in v2): self.assertAllEqual((a**b), np.power(v1, v2)) self.assertAllEqual((a / b), np.true_divide(v1, v2)) self.assertAllEqual((a / a), np.true_divide(v1, v1)) self.assertAllEqual((a % b), np.mod(v1, v2)) self.assertAllEqual((a < b), np.less(v1, v2)) self.assertAllEqual((a <= b), np.less_equal(v1, v2)) self.assertAllEqual((a > b), np.greater(v1, v2)) self.assertAllEqual((a >= b), np.greater_equal(v1, v2)) # TODO(b/120678848): Remove the else branch once we enable # ops.Tensor._USE_EQUALITY by default. if ops.Tensor._USE_EQUALITY: self.assertAllEqual((a == b), np.equal(v1, v2)) self.assertAllEqual((a != b), np.not_equal(v1, v2)) else: self.assertAllEqual((a == b), np.equal(v1, v2)[0]) self.assertAllEqual((a != b), np.not_equal(v1, v2)[0]) self.assertAllEqual(v1[0], a[constant_op.constant(0)]) ops_test([1, 4, 8], [2, 3, 5]) ops_test([1, -4, -5], [-2, 3, -6]) def test_basic_slice(self): npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3) t = constant_op.constant(npt) self.assertAllEqual(npt[:, :, :], t[:, :, :]) self.assertAllEqual(npt[::, ::, ::], t[::, ::, ::]) self.assertAllEqual(npt[::1, ::1, ::1], t[::1, ::1, ::1]) self.assertAllEqual(npt[::1, ::5, ::2], t[::1, ::5, ::2]) self.assertAllEqual(npt[::-1, :, :], t[::-1, :, :]) self.assertAllEqual(npt[:, ::-1, :], t[:, ::-1, :]) self.assertAllEqual(npt[:, :, ::-1], t[:, :, ::-1]) self.assertAllEqual(npt[-2::-1, :, ::1], t[-2::-1, :, ::1]) self.assertAllEqual(npt[-2::-1, :, ::2], t[-2::-1, :, ::2]) def testDegenerateSlices(self): npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3) t = constant_op.constant(npt) # degenerate by offering a forward interval with a negative stride self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :]) # degenerate with a reverse interval with a positive stride self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :]) # empty interval in every dimension self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1]) def testEllipsis(self): npt = np.array( [[[[[1, 2], [3, 4], [5, 6]]], [[[7, 8], [9, 10], [11, 12]]]]]) t = constant_op.constant(npt) self.assertAllEqual(npt[0:], t[0:]) # implicit ellipsis self.assertAllEqual(npt[0:, ...], t[0:, ...]) # ellipsis alone self.assertAllEqual(npt[...], t[...]) # ellipsis at end self.assertAllEqual(npt[0:1, ...], t[0:1, ...]) # ellipsis at begin self.assertAllEqual(npt[..., 0:1], t[..., 0:1]) # ellipsis at middle self.assertAllEqual(npt[0:1, ..., 0:1], t[0:1, ..., 0:1]) def testShrink(self): npt = np.array([[[[[1, 2, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]], [[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]]]]) t = constant_op.constant(npt) self.assertAllEqual(npt[:, :, :, :, 3], t[:, :, :, :, 3]) self.assertAllEqual(npt[..., 3], t[..., 3]) self.assertAllEqual(npt[:, 0], t[:, 0]) self.assertAllEqual(npt[:, :, 0], t[:, :, 0]) @test_util.run_gpu_only def testOpWithInputsOnDifferentDevices(self): # The GPU kernel for the Reshape op requires that the # shape input be on CPU. value = constant_op.constant([1., 2.]).gpu() shape = constant_op.constant([2, 1]) reshaped = array_ops.reshape(value, shape) self.assertAllEqual([[1], [2]], reshaped.cpu()) def testInt64(self): # Fill requires the first input to be an int32 tensor. self.assertAllEqual( [1.0, 1.0], array_ops.fill(constant_op.constant([2], dtype=dtypes.int64), constant_op.constant(1))) @test_util.run_gpu_only def testOutputOnHostMemory(self): # The Shape op kernel on GPU places the output in host memory. value = constant_op.constant([1.]).gpu() shape = array_ops.shape(value) self.assertEqual([1], shape.numpy()) @test_util.run_gpu_only def testSilentCopy(self): # Temporarily replace the context # pylint: disable=protected-access old_context = context.context() context._set_context(context.Context()) try: config.set_device_policy('silent') cpu_tensor = constant_op.constant(1.0) gpu_tensor = cpu_tensor.gpu() self.assertAllEqual(cpu_tensor + gpu_tensor, 2.0) finally: context._set_context(old_context) # pylint: enable=protected-access @test_util.run_gpu_only def testSoftPlacement(self): # Temporarily replace the context # pylint: disable=protected-access old_context = context.context() context._set_context(context.Context()) try: config.set_device_policy('silent') config.set_soft_device_placement(True) cpu_tensor = constant_op.constant(1.0) result = cpu_tensor + cpu_tensor self.assertEqual(result.device, '/job:localhost/replica:0/task:0/device:GPU:0') finally: context._set_context(old_context) # pylint: enable=protected-access def testRandomUniform(self): scalar_shape = constant_op.constant([], dtype=dtypes.int32) x = random_ops.random_uniform(scalar_shape) self.assertEquals(0, x.shape.ndims) self.assertEquals(dtypes.float32, x.dtype) x = random_ops.random_uniform( scalar_shape, minval=constant_op.constant(5.), maxval=constant_op.constant(6.)) self.assertLess(x, 6) self.assertGreaterEqual(x, 5) def testArgsToMatchingEagerDefault(self): # Uses default ctx = context.context() t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int32) self.assertEquals(t, dtypes.int32) self.assertEquals(r[0].dtype, dtypes.int32) t, r = execute.args_to_matching_eager([[3, 4]], ctx, dtypes.int64) self.assertEquals(t, dtypes.int64) self.assertEquals(r[0].dtype, dtypes.int64) # Doesn't use default t, r = execute.args_to_matching_eager( [['string', 'arg']], ctx, dtypes.int32) self.assertEquals(t, dtypes.string) self.assertEquals(r[0].dtype, dtypes.string) def testFlattenLayer(self): flatten_layer = core.Flatten() x = constant_op.constant([[[-10, -20], [-30, -40]], [[10, 20], [30, 40]]]) y = flatten_layer(x) self.assertAllEqual([[-10, -20, -30, -40], [10, 20, 30, 40]], y) def testIdentity(self): self.assertAllEqual(2, array_ops.identity(2)) @test_util.run_gpu_only def testIdentityOnVariable(self): with context.device('/gpu:0'): v = resource_variable_ops.ResourceVariable(True) self.assertAllEqual(True, array_ops.identity(v)) def testIncompatibleSetShape(self): x = constant_op.constant(1) with self.assertRaises(ValueError): x.set_shape((1, 2)) def testCompatibleSetShape(self): x = constant_op.constant([[1, 2]]) x.set_shape(tensor_shape.TensorShape([None, 2])) self.assertEqual(x.get_shape(), (1, 2)) def testCastScalarToPrimitiveTypes(self): x = constant_op.constant(1.3) self.assertIsInstance(int(x), int) self.assertEqual(int(x), 1) self.assertIsInstance(float(x), float) self.assertAllClose(float(x), 1.3) def testCastNonScalarToPrimitiveTypesFails(self): x = constant_op.constant([1.3, 2]) with self.assertRaises(TypeError): int(x) with self.assertRaises(TypeError): float(x) def testRange(self): x = constant_op.constant(2) self.assertEqual([0, 1], list(range(x))) def testFormatString(self): x = constant_op.constant(3.1415) self.assertEqual('3.14', '{:.2f}'.format(x)) def testNoOpIsNone(self): self.assertTrue(control_flow_ops.no_op() is None) def testEagerContextPreservedAcrossThreads(self): def init_fn(): self.assertTrue(context.executing_eagerly()) with ops.init_scope(): self.assertTrue(context.executing_eagerly()) context_switches = context.context().context_switches self.assertEqual(len(context_switches.stack), 1) self.assertFalse(context_switches.stack[0].is_building_function) self.assertEqual(context_switches.stack[0].enter_context_fn, context.eager_mode) self.assertTrue(context.executing_eagerly()) t1 = threading.Thread(target=init_fn) t1.start() t1.join() def testWeakrefEagerTensor(self): x = constant_op.constant([[1.]]) x.at1 = constant_op.constant([[2.]]) x.at2 = 3. weak_x = weakref.ref(x) weak_xat1 = weakref.ref(x.at1) del x self.assertIs(weak_x(), None) self.assertIs(weak_xat1(), None) def testWeakKeyDictionaryTensor(self): weak_key_dict = weakref.WeakKeyDictionary() strong_x = constant_op.constant([[1.]]) strong_y = constant_op.constant([[2.]]) strong_x_ref = strong_x.experimental_ref() strong_y_ref = strong_y.experimental_ref() weak_key_dict[strong_x_ref] = constant_op.constant([[3.]]) weak_key_dict[strong_y_ref] = constant_op.constant([[4.]]) strong_y.a = constant_op.constant([[5.]]) weak_x_ref = weakref.ref(strong_x) del strong_x, strong_x_ref self.assertIs(weak_x_ref(), None) self.assertEqual([strong_y_ref], list(weak_key_dict)) self.assertEqual(1, len(list(weak_key_dict))) self.assertEqual(1, len(weak_key_dict)) del strong_y, strong_y_ref self.assertEqual([], list(weak_key_dict)) def testEagerTensorsCanBeGarbageCollected(self): x = constant_op.constant([[1.]]) y = constant_op.constant([[2.]]) x.y = y y.x = x weak_x = weakref.ref(x) weak_y = weakref.ref(y) del x del y gc.collect() self.assertIs(weak_x(), None) self.assertIs(weak_y(), None) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/ops_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import weakref import numpy as np from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.platform import test class ContextTest(test.TestCase): def testSetGlobalSeed(self): c = context.Context() c._set_global_seed(123) for t in [np.int32, np.int64, np.uint32, np.uint64]: c._set_global_seed(t(123)) c._set_global_seed(np.array(123, dtype=t)) c._set_global_seed(ops.convert_to_tensor(123, dtype=t)) def testContextIsDestroyedAfterTensors(self): # Create a new context new_context = context.Context() weak_c = weakref.ref(new_context) new_context.ensure_initialized() # Create a tensor with the new context as default. # Make sure to restore the original context. original_context = context.context() try: context._set_context(new_context) # Use a 2D tensor so that it is not cached. tensor1 = constant_op.constant([[3.]]) # Produce a tensor as an operation output. This uses a different code path # from tensors created from Python. tensor2 = tensor1 * tensor1 context._set_context(original_context) except: context._set_context(original_context) raise # Deleting our context reference should not delete the underlying object. del new_context self.assertIsNot(weak_c(), None) # Deleting the first tensor should not delete the context since there is # another tensor. del tensor1 self.assertIsNot(weak_c(), None) # Deleting the last tensor should result in deleting its context. del tensor2 self.assertIs(weak_c(), None) if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/context_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for graph_only_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import graph_only_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import test_util from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class GraphOnlyOpsTest(test_util.TensorFlowTestCase): @test_util.run_deprecated_v1 def testGraphZerosLike(self): x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32) z_tf = graph_only_ops.graph_zeros_like(x) with self.cached_session(): self.assertAllClose(np.zeros((2, 3)), self.evaluate(z_tf)) @test_util.run_deprecated_v1 def testGraphPlaceholder(self): x_tf = graph_only_ops.graph_placeholder(dtypes.int32, shape=(1,)) y_tf = math_ops.square(x_tf) with self.cached_session() as sess: x = np.array([42]) y = sess.run(y_tf, feed_dict={x_tf: np.array([42])}) self.assertAllClose(np.square(x), y) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/graph_only_ops_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Benchmarks for low-level eager execution primitives. To run CPU benchmarks: bazel run -c opt benchmarks_test -- --benchmarks=. To run GPU benchmarks: bazel run --config=cuda -c opt --copt="-mavx" benchmarks_test -- \ --benchmarks=. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import gc import os import time import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python import keras from tensorflow.python import pywrap_tensorflow from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop # pylint: disable=unused-import from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import def_function from tensorflow.python.eager import forwardprop from tensorflow.python.eager import function from tensorflow.python.eager import profiler from tensorflow.python.eager import remote from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.training import gradient_descent from tensorflow.python.training import server_lib CPU = "/device:CPU:0" GPU = "/device:GPU:0" def c_tfe_py_fastpath_execute(a, b, transpose_a=False, transpose_b=False, name=None): ctx = context.context() assert ctx.executing_eagerly( ), "The prototype doesn't contain C code for graph construction" try: return pywrap_tensorflow.TFE_Py_FastPathExecute( ctx._handle, ctx.device_name, "MatMul", name, ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a, "transpose_b", transpose_b) except core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message six.raise_from(core._status_to_exception(e.code, message), None) class SubclassedKerasModel(keras.Model): def __init__(self, initializer="ones"): super(SubclassedKerasModel, self).__init__() self.layer_a = keras.layers.Dense( 64, kernel_initializer=initializer, bias_initializer="zeros") self.layer_b = keras.layers.Dense( 128, kernel_initializer=initializer, bias_initializer="zeros") self.layer_c = keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros") self.layer_d = keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros") self.layer_e = keras.layers.Dense( 10, kernel_initializer=initializer, bias_initializer="zeros") def call(self, x): x = self.layer_a(x) x = self.layer_b(x) x = self.layer_c(x) x = self.layer_d(x) return self.layer_e(x) def make_keras_model(initializer="ones"): model_input = keras.Input(shape=(10,)) x = keras.layers.Dense( 64, kernel_initializer=initializer, bias_initializer="zeros")(model_input) x = keras.layers.Dense( 128, kernel_initializer=initializer, bias_initializer="zeros")(x) x = keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")(x) x = keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")(x) x = keras.layers.Dense( 10, kernel_initializer=initializer, bias_initializer="zeros")(x) return keras.Model(inputs=model_input, outputs=x) def make_sequential_keras_model(initializer="ones"): model = keras.models.Sequential() model.add(keras.layers.Dense( 64, kernel_initializer=initializer, bias_initializer="zeros", input_shape=(10,))) model.add(keras.layers.Dense( 128, kernel_initializer=initializer, bias_initializer="zeros")) model.add(keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")) model.add(keras.layers.Dense( 256, kernel_initializer=initializer, bias_initializer="zeros")) model.add(keras.layers.Dense( 10, kernel_initializer=initializer, bias_initializer="zeros")) return model def run_benchmark(func, num_iters, execution_mode=None): ctx = context.context() with context.execution_mode(execution_mode): # call func to maybe warm up the GPU func() if execution_mode == context.ASYNC: ctx.executor.wait() start = time.time() for _ in xrange(num_iters): func() if execution_mode == context.ASYNC: ctx.executor.wait() end = time.time() return end - start class MicroBenchmarks(test.Benchmark): def __init__(self): # used for multiply benchmarks self._m_2 = random_ops.random_uniform([2]) # used for matmul benchmarks self._m_2_by_2 = random_ops.random_uniform((2, 2)) self._m_100_by_784 = random_ops.random_uniform((100, 784)) self._num_iters_2_by_2 = 30000 self._num_iters_100_by_784 = 30000 def _run(self, func, num_iters, execution_mode=None): total_time = run_benchmark(func, num_iters, execution_mode) mean_us = total_time * 1e6 / num_iters self.report_benchmark( iters=num_iters, wall_time=mean_us, extras={"examples_per_sec": num_iters / total_time}) def benchmark_create_np_array(self): func = lambda: np.array([3.0]) self._run(func, 30000) def _benchmark_create_tensor(self, value, dtype, device): """Benchmark overheads of creating a Tensor object.""" ctx = context.context() if device == GPU: # Warmup the GPU ops.EagerTensor(value, device=device) def func(): ops.EagerTensor(value, device=device, dtype=dtype) self._run(func, 30000) def _benchmark_create_constant(self, value, dtype): def func(): constant_op.constant(value, dtype=dtype) with ops.device("GPU:0" if context.num_gpus() else "CPU:0"): for _ in range(1000): func() # Warmup. self._run(func, 3000) def benchmark_create_float_constant(self): self._benchmark_create_constant(42.0, dtype=None) def benchmark_create_int32_constant(self): if context.num_gpus(): return # int32 constants are always allocated on CPU. self._benchmark_create_constant(42, dtype=dtypes.int32) def _benchmark_add_scalars(self, a, b): def func(): return memoryview(math_ops.add(a, b)) with ops.device("GPU:0" if context.num_gpus() else "CPU:0"): for _ in range(1000): func() # Warmup. self._run(func, 30000) def benchmark_add_float_scalars(self): self._benchmark_add_scalars(42.0, 24.0) def benchmark_add_int32_scalars(self): self._benchmark_add_scalars(42, 24) def benchmark_create_float_tensor_from_list_CPU(self): self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, CPU) def benchmark_create_float_tensor_from_np_array_CPU(self): self._benchmark_create_tensor( np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum, CPU) def benchmark_create_int32_tensor_from_list_CPU(self): self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, CPU) def benchmark_create_int32_tensor_from_np_array_CPU(self): self._benchmark_create_tensor( np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, CPU) def benchmark_create_float_tensor_from_list_GPU(self): if not context.num_gpus(): return self._benchmark_create_tensor([[3.0]], dtypes.float32.as_datatype_enum, GPU) def benchmark_create_float_tensor_from_np_array_GPU(self): if not context.num_gpus(): return self._benchmark_create_tensor( np.array([[3.0]], dtype=np.float32), dtypes.float32.as_datatype_enum, GPU) def benchmark_create_int32_tensor_from_list_GPU(self): # int32's are kept on host memory even when executing on GPU. if not context.num_gpus(): return self._benchmark_create_tensor([[3]], dtypes.int32.as_datatype_enum, GPU) def benchmark_create_int32_tensor_from_np_array_GPU(self): # int32's are kept on host memory even when executing on GPU. if not context.num_gpus(): return self._benchmark_create_tensor( np.array([[3]], dtype=np.int32), dtypes.int32.as_datatype_enum, GPU) def benchmark_index_tensor_with_literal(self): func = lambda: constant_op.constant([3.0])[0] self._run(func, 30000) def benchmark_index_tensor_with_tensor(self): func = lambda idx=constant_op.constant(0): constant_op.constant([3.0])[idx] self._run(func, 30000) def benchmark_index_tensor_with_np_array(self): func = lambda idx=np.array(0): constant_op.constant([3.0])[idx] self._run(func, 30000) def _benchmark_np_multiply(self, m, num_iters): a = m.cpu().numpy() func = lambda: a * a self._run(func, num_iters) def _benchmark_tf_multiply(self, m, num_iters): func = lambda: m * m self._run(func, num_iters) def _benchmark_tf_multiply_op(self, m, num_iters): func = lambda: math_ops.multiply(m, m) self._run(func, num_iters) def benchmark_np_multiply(self): self._benchmark_np_multiply(self._m_2, 30000) def benchmark_tf_multiply_CPU(self): with context.device(CPU): m = self._m_2.cpu() self._benchmark_tf_multiply(m, 30000) def benchmark_tf_multiply_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2.gpu() self._benchmark_tf_multiply(m, 30000) def benchmark_tf_multiply_op_CPU(self): with context.device(CPU): m = self._m_2.cpu() self._benchmark_tf_multiply_op(m, 30000) def benchmark_tf_multiply_op_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2.gpu() self._benchmark_tf_multiply_op(m, 30000) def benchmark_tf_identity(self): m = self._m_2 self._run(lambda: gen_array_ops.identity(m), 30000) def benchmark_slowpath_tf_identity(self): self._run(lambda: gen_array_ops.identity(1), 30000) def benchmark_tfe_py_execute_identity(self): m = self._m_2 ctx_handle = context.context()._handle attrs = ("T", self._m_2.dtype.as_datatype_enum) inputs = [m] def f(): pywrap_tensorflow.TFE_Py_Execute(ctx_handle, None, "Identity", inputs, attrs, 1) self._run(f, 30000) def benchmark_tf_gradient_function_identity(self): with context.device(CPU): m = gen_array_ops.identity(self._m_2) self._run( lambda: backprop.gradients_function(gen_array_ops.identity, [0])(m), 30000) def benchmark_tf_gradient_forward_identity(self): with backprop.GradientTape() as tape: m = self._m_2 tape.watch(m) self._run(lambda: gen_array_ops.identity(m), 30000) def benchmark_tf_gradient_tape_push_pop(self): def f(): with backprop.GradientTape(): pass self._run(f, 30000) def benchmark_tf_gradient_function_no_op(self): with context.device(CPU): m = gen_array_ops.identity(self._m_2) self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000) def _benchmark_np_matmul(self, m, transpose_b, num_iters): a = m.cpu().numpy() b = a.T if transpose_b else a func = lambda: np.dot(a, b) self._run(func, num_iters) def _benchmark_tf_matmul(self, m, transpose_b, num_iters, execution_mode=None): func = lambda: math_ops.matmul(m, m, transpose_b=transpose_b) self._run(func, num_iters, execution_mode=execution_mode) def _benchmark_gen_math_ops_matmul(self, m, transpose_b, num_iters): def func(): gen_math_ops.mat_mul(m, m, transpose_b=transpose_b) self._run(func, num_iters) def _benchmark_tfe_py_fastpath_execute_matmul(self, m, transpose_b, num_iters): def func(): c_tfe_py_fastpath_execute(m, m, transpose_b=transpose_b) self._run(func, num_iters) def _benchmark_tfe_py_execute_matmul(self, m, transpose_b, num_iters): inputs = [m, m] # pylint: disable=protected-access ctx_handle = context.context()._handle # pylint: enable=protected-access device = context.context().device_name attrs = ("transpose_a", False, "transpose_b", transpose_b, "T", m.dtype.as_datatype_enum) def func(): pywrap_tensorflow.TFE_Py_Execute(ctx_handle, device, "MatMul", inputs, attrs, 1) self._run(func, num_iters) def _benchmark_defun_matmul(self, m, transpose_b, num_iters, execution_mode=None): f = function.defun(math_ops.matmul) func = lambda: f(m, m, transpose_b=transpose_b) self._run(func, num_iters, execution_mode=execution_mode) def _benchmark_nested_defun_matmul(self, m, transpose_b, num_iters): inner = function.defun(math_ops.matmul) @function.defun def outer(a, b, c, transpose_b): return math_ops.matmul(inner(a, b, transpose_b=transpose_b), c) func = lambda: outer(m, m, m, transpose_b=transpose_b) # Warmup before benchmark for _ in range(1000): func() self._run(func, num_iters) def _benchmark_defun_matmul_forward_backward(self, m, transpose_b, num_iters, execution_mode=None): f = function.defun(math_ops.matmul) def func(): with backprop.GradientTape() as gt: gt.watch(m) y = f(m, m, transpose_b=transpose_b) _ = gt.gradient(y, m) self._run(func, num_iters, execution_mode=execution_mode) def _benchmark_read_variable(self, m, num_iters): self._run(m.value, num_iters) def _benchmark_matmul_read_variable(self, m, num_iters): self._benchmark_gen_math_ops_matmul( m, transpose_b=False, num_iters=num_iters) def _benchmark_matmul_read_variable_with_tape(self, m, num_iters): with backprop.GradientTape() as tape: tape.watch(m) self._benchmark_gen_math_ops_matmul( m, transpose_b=False, num_iters=num_iters) def _benchmark_read_variable_with_tape(self, m, num_iters): with backprop.GradientTape() as tape: tape.watch(m) self._run(m.value, num_iters) # Benchmarks for A^2, A of dimension 2 by 2. def benchmark_np_matmul_2_by_2(self): self._benchmark_np_matmul( self._m_2_by_2, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_tf_matmul_2_by_2_CPU(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_tf_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_tf_matmul_2_by_2_CPU_async(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_tf_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2, execution_mode=context.ASYNC) def benchmark_gen_math_ops_matmul_2_by_2_CPU(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_gen_math_ops_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_tfe_py_fastpath_execute_matmul_2_by_2_CPU(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_tfe_py_fastpath_execute_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_tfe_py_execute_matmul_2_by_2_CPU(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_tfe_py_execute_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_defun_matmul_2_by_2_CPU(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_defun_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_defun_matmul_2_by_2_CPU_async(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_defun_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2, execution_mode=context.ASYNC) def benchmark_defun_matmul_forward_backward_2_by_2_CPU(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_defun_matmul_forward_backward( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_defun_matmul_forward_backward_2_by_2_CPU_async(self): with context.device(CPU): m = self._m_2_by_2.cpu() self._benchmark_defun_matmul_forward_backward( m, transpose_b=False, num_iters=self._num_iters_2_by_2, execution_mode=context.ASYNC) def benchmark_tf_matmul_2_by_2_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2_by_2.gpu() self._benchmark_tf_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_tf_matmul_2_by_2_GPU_async(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2_by_2.gpu() self._benchmark_tf_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2, execution_mode=context.ASYNC) def benchmark_gen_math_ops_matmul_2_by_2_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2_by_2.gpu() self._benchmark_gen_math_ops_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_tfe_py_execute_matmul_2_by_2_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2_by_2.gpu() self._benchmark_tfe_py_execute_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_defun_matmul_2_by_2_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2_by_2.gpu() self._benchmark_defun_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) def benchmark_defun_matmul_2_by_2_GPU_async(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_2_by_2.gpu() self._benchmark_defun_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2, execution_mode=context.ASYNC) def benchmark_nested_defun_matmul_2_by_2(self): m = self._m_2_by_2.cpu() self._benchmark_nested_defun_matmul( m, transpose_b=False, num_iters=self._num_iters_2_by_2) # Benchmarks for AA.T, A of dimension 100 by 784. def benchmark_np_matmul_100_by_784(self): self._benchmark_np_matmul( self._m_100_by_784, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_tf_matmul_100_by_784_CPU(self): with context.device(CPU): m = self._m_100_by_784.cpu() self._benchmark_tf_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_tf_matmul_100_by_784_CPU_async(self): with context.device(CPU): m = self._m_100_by_784.cpu() self._benchmark_tf_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784, execution_mode=context.ASYNC) def benchmark_gen_math_ops_matmul_100_by_784_CPU(self): with context.device(CPU): m = self._m_100_by_784.cpu() self._benchmark_gen_math_ops_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_tfe_py_fastpath_execute_matmul_100_by_784_CPU(self): with context.device(CPU): m = self._m_100_by_784.cpu() self._benchmark_tfe_py_fastpath_execute_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_tfe_py_execute_matmul_100_by_784_CPU(self): with context.device(CPU): m = self._m_100_by_784.cpu() self._benchmark_tfe_py_execute_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_defun_matmul_100_by_784_CPU(self): with context.device(CPU): m = self._m_100_by_784.cpu() self._benchmark_defun_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_tf_matmul_100_by_784_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_100_by_784.gpu() self._benchmark_tf_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_tf_matmul_100_by_784_GPU_async(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_100_by_784.gpu() self._benchmark_tf_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784, execution_mode=context.ASYNC) def benchmark_gen_math_ops_matmul_100_by_784_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_100_by_784.gpu() self._benchmark_gen_math_ops_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_tfe_py_execute_matmul_100_by_784_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_100_by_784.gpu() self._benchmark_tfe_py_execute_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_defun_matmul_100_by_784_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = self._m_100_by_784.gpu() self._benchmark_defun_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def benchmark_nested_defun_matmul_100_by_784(self): m = self._m_100_by_784.gpu() self._benchmark_nested_defun_matmul( m, transpose_b=True, num_iters=self._num_iters_100_by_784) def _benchmark_forwardprop_matmul_CPU(self, shape): with ops.device(CPU): m = random_ops.random_uniform(shape).cpu() tangent = random_ops.random_uniform(shape).cpu() def func(): with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(m, tangent) result = math_ops.matmul(m, m, transpose_b=True) return result, acc.jvp(result) # Warmup before benchmark for _ in range(100): func() self._run(func, 3000) def _benchmark_forwardprop_in_defun_matmul_CPU(self, shape): with ops.device(CPU): @def_function.function def compiled_function(x, tangent): with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(x, tangent) result = math_ops.matmul(x, x, transpose_b=True) return result, acc.jvp(result) m = random_ops.random_uniform(shape).cpu() tangent = random_ops.random_uniform(shape).cpu() func = lambda: compiled_function(m, tangent) # Warmup before benchmark for _ in range(100): func() self._run(func, 3000) def _benchmark_forwardprop_in_defun_of_defun_matmul_CPU(self, shape): with ops.device(CPU): matmul = def_function.function(math_ops.matmul) @def_function.function() def compiled_function(x, tangent): with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(x, tangent) result = matmul(x, x, transpose_b=True) return result, acc.jvp(result) m = random_ops.random_uniform(shape).cpu() tangent = random_ops.random_uniform(shape).cpu() func = lambda: compiled_function(m, tangent) # Warmup before benchmark for _ in range(100): func() self._run(func, 3000) def _benchmark_forwardprop_of_defun_matmul_CPU(self, shape): with ops.device(CPU): m = random_ops.random_uniform(shape).cpu() tangent = random_ops.random_uniform(shape).cpu() matmul = def_function.function(math_ops.matmul) def func(): with forwardprop.ForwardGradientAccumulator() as acc: acc.watch(m, tangent) result = matmul(m, m, transpose_b=True) return result, acc.jvp(result) # Warmup before benchmark for _ in range(100): func() self._run(func, 3000) def benchmark_forwardprop_matmul_256_by_2096_CPU(self): self._benchmark_forwardprop_matmul_CPU(shape=(256, 2096)) def benchmark_forwardprop_in_defun_matmul_256_by_2096_CPU(self): self._benchmark_forwardprop_in_defun_matmul_CPU(shape=(256, 2096)) def benchmark_forwardprop_in_defun_of_defun_matmul_256_by_2096_CPU(self): self._benchmark_forwardprop_in_defun_of_defun_matmul_CPU(shape=(256, 2096)) def benchmark_forwardprop_of_defun_matmul_256_by_2096_CPU(self): self._benchmark_forwardprop_of_defun_matmul_CPU(shape=(256, 2096)) def benchmark_forwardprop_matmul_100_by_784_CPU(self): self._benchmark_forwardprop_matmul_CPU(shape=(100, 784)) def benchmark_forwardprop_in_defun_matmul_100_by_784_CPU(self): self._benchmark_forwardprop_in_defun_matmul_CPU(shape=(100, 784)) def benchmark_forwardprop_in_defun_of_defun_matmul_100_by_784_CPU(self): self._benchmark_forwardprop_in_defun_of_defun_matmul_CPU(shape=(100, 784)) def benchmark_forwardprop_of_defun_matmul_100_by_784_CPU(self): self._benchmark_forwardprop_of_defun_matmul_CPU(shape=(100, 784)) def benchmark_defun_without_signature(self): def func(t1, t2, t3, t4, t5, t6, t7, t8): del t1, t2, t3, t4, t5, t6, t7, t8 return None defined = function.defun(func) t = constant_op.constant(0.0) cache_computation = lambda: defined(t, t, t, t, t, t, t, t) self._run(cache_computation, 30000) def benchmark_defun_without_signature_and_with_kwargs(self): def func(t1, t2, t3, t4, t5, t6, t7, t8): del t1, t2, t3, t4, t5, t6, t7, t8 return None defined = function.defun(func) t = constant_op.constant(0.0) def cache_computation(): return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t) self._run(cache_computation, 30000) def benchmark_defun_with_signature(self): def func(t1, t2, t3, t4, t5, t6, t7, t8): del t1, t2, t3, t4, t5, t6, t7, t8 return None defined = function.defun( func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8) t = constant_op.constant(0.0) signature_computation = lambda: defined(t, t, t, t, t, t, t, t) self._run(signature_computation, 30000) def benchmark_defun_with_signature_and_kwargs(self): def func(t1, t2, t3, t4, t5, t6, t7, t8): del t1, t2, t3, t4, t5, t6, t7, t8 return None defined = function.defun( func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8) t = constant_op.constant(0.0) def signature_computation(): return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t) self._run(signature_computation, 30000) def benchmark_matmul_read_variable_op_2_by_2_CPU(self): with context.device(CPU): m = resource_variable_ops.ResourceVariable(self._m_2_by_2) self._benchmark_matmul_read_variable(m, num_iters=self._num_iters_2_by_2) def benchmark_matmul_read_variable_op_with_tape_2_by_2_CPU(self): with context.device(CPU): m = resource_variable_ops.ResourceVariable(self._m_2_by_2) self._benchmark_matmul_read_variable_with_tape( m, num_iters=self._num_iters_2_by_2) def benchmark_read_variable_op_2_by_2_CPU(self): with context.device(CPU): m = resource_variable_ops.ResourceVariable(self._m_2_by_2) self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2) def benchmark_read_variable_op_2_by_2_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu()) self._benchmark_read_variable(m, num_iters=self._num_iters_2_by_2) def benchmark_read_variable_op_with_tape_2_by_2_CPU(self): with context.device(CPU): m = resource_variable_ops.ResourceVariable(self._m_2_by_2) self._benchmark_read_variable_with_tape( m, num_iters=self._num_iters_2_by_2) def benchmark_read_variable_op_with_tape_2_by_2_GPU(self): if not context.num_gpus(): return with context.device(GPU): m = resource_variable_ops.ResourceVariable(self._m_2_by_2.gpu()) self._benchmark_read_variable_with_tape( m, num_iters=self._num_iters_2_by_2) def benchmark_keras_model_subclassed(self): model = SubclassedKerasModel() data = random_ops.random_uniform((10, 10)) func = lambda: model(data) # First call is more expensive (creates variables etc.), discount that. func() # The whole point of this test is to contrast subclassing with # the functional style of keras model building, so validate that # the models are equivalent. assert np.equal(func(), make_keras_model()(data)).all() self._run(func, 30000) def benchmark_keras_model_functional(self): model = make_keras_model() data = random_ops.random_uniform((10, 10)) func = lambda: model(data) # Symmetry with benchmark_keras_model_subclassed func() assert np.equal(func(), SubclassedKerasModel()(data)).all() self._run(func, 30000) def benchmark_keras_model_sequential(self): model = make_sequential_keras_model() data = random_ops.random_uniform((10, 10)) func = lambda: model(data) # Symmetry with benchmark_keras_model_functional func() assert np.equal(func(), make_keras_model()(data)).all() self._run(func, 30000) def _benchmark_keras_model_fit(self, model, run_eagerly=False): data = random_ops.random_uniform((10, 10), minval=-1, maxval=1) labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1) dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat() model.compile( gradient_descent.GradientDescentOptimizer(learning_rate=0.001), loss="mse", run_eagerly=run_eagerly) func = lambda: model.fit(dataset, epochs=1, steps_per_epoch=1000, verbose=0) # First call is more expensive (creates variables etc.), discount that. model.fit(dataset, epochs=1, steps_per_epoch=1, verbose=0) self._run(func, 1) def _benchmark_keras_model_evaluate(self, model, run_eagerly=False): data = random_ops.random_uniform((10, 10), minval=-1, maxval=1) labels = random_ops.random_uniform((10, 10), minval=-1, maxval=1) dataset = dataset_ops.Dataset.from_tensors((data, labels)).repeat() model.compile( gradient_descent.GradientDescentOptimizer(learning_rate=0.001), loss="mse", run_eagerly=run_eagerly) func = lambda: model.evaluate(dataset, steps=1000, verbose=0) # First call is more expensive (creates variables etc.), discount that. model.evaluate(dataset, steps=1, verbose=0) self._run(func, 1) def _benchmark_keras_model_predict(self, model, run_eagerly=False): data = random_ops.random_uniform((10, 10), minval=-1, maxval=1) dataset = dataset_ops.Dataset.from_tensors(tuple([data])).repeat() model.compile( gradient_descent.GradientDescentOptimizer(learning_rate=0.001), loss="mse", run_eagerly=run_eagerly) func = lambda: model.predict(dataset, steps=1000, verbose=0) # First call is more expensive (creates variables etc.), discount that. model.predict(dataset, steps=1, verbose=0) self._run(func, 1) def benchmark_keras_model_subclassed_fit(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_subclassed_fit_graph_mode(self): with context.graph_mode(): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_subclassed_fit_run_model_eagerly(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) def benchmark_keras_model_functional_fit(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_functional_fit_graph_mode(self): with context.graph_mode(): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_functional_fit_graph_mode_with_profiler(self): profiler.start() with context.graph_mode(): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) result = profiler.stop() assert result is not None def benchmark_keras_model_functional_fit_run_model_eagerly(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) def benchmark_keras_model_functional_fit_run_model_eagerly_with_profiler( self): profiler.start() model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) result = profiler.stop() assert result is not None def benchmark_keras_model_sequential_fit(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_sequential_fit_graph_mode(self): with context.graph_mode(): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model) def benchmark_keras_model_sequential_fit_run_model_eagerly(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_fit(model, run_eagerly=True) def benchmark_keras_model_subclassed_evaluate(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model) def benchmark_keras_model_subclassed_evaluate_run_model_eagerly(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model, run_eagerly=True) def benchmark_keras_model_functional_evaluate(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model) def benchmark_keras_model_functional_evaluate_run_model_eagerly(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model, run_eagerly=True) def benchmark_keras_model_sequential_evaluate(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model) def benchmark_keras_model_sequential_evaluate_run_model_eagerly(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_evaluate(model, run_eagerly=True) def benchmark_keras_model_subclassed_predict(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_predict(model) def benchmark_keras_model_subclassed_predict_run_model_eagerly(self): model = SubclassedKerasModel(initializer="glorot_uniform") self._benchmark_keras_model_predict(model, run_eagerly=True) def benchmark_keras_model_functional_predict(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model) def benchmark_keras_model_functional_predict_run_model_eagerly(self): model = make_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model, run_eagerly=True) def benchmark_keras_model_sequential_predict(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model) def benchmark_keras_model_sequential_predict_run_model_eagerly(self): model = make_sequential_keras_model(initializer="glorot_uniform") self._benchmark_keras_model_predict(model, run_eagerly=True) def benchmarkScan(self): elems = math_ops.range(1600) def scan(): return functional_ops.scan( lambda a, x: a + x, elems, parallel_iterations=1) self._run(scan, 100) def benchmarkScanDefun(self): elems = math_ops.range(1600) @function.defun def scan(): return functional_ops.scan( lambda a, x: a + x, elems, parallel_iterations=1) self._run(scan, 100) def benchmark_fastpath_conversion_type_inference(self): c = constant_op.constant(1., dtype=dtypes.float32) def fn(): return gen_math_ops.add(c, 1) self._run(fn, 10000) def benchmark_convert_3x_list_to_tensor(self): xs = [1, 2, 3] self._run(lambda: ops.convert_to_tensor(xs), 1000) def benchmark_convert_3x_array_to_tensor(self): xs = np.array([1, 2, 3], dtype=np.int32) self._run(lambda: ops.convert_to_tensor(xs), 1000) def benchmark_constant_40x2_list_to_tensor(self): xs = [[0] * 2] * 40 self._run(lambda: constant_op.constant(xs), 1000) def benchmark_constant_40x2_array_to_tensor(self): xs = np.array([[0] * 2] * 40, dtype=np.int32) self._run(lambda: constant_op.constant(xs), 1000) def benchmark_constant_40x_list_of_2x_arrays_to_tensor(self): xs = [np.array([0] * 2, dtype=np.int32)] * 40 self._run(lambda: constant_op.constant(xs), 1000) def _benchmarkFunctionWithResourceInputs(self, num_resources, num_iters): @def_function.function def add_all(*args): return math_ops.add_n(*args) with context.device(CPU): resources = [] for _ in range(num_resources): resources.append(resource_variable_ops.ResourceVariable(self._m_2)) self._run(lambda: add_all(resources), num_iters) def benchmarkFunctionWithFiveResourceInputs(self): self._benchmarkFunctionWithResourceInputs(5, 1000) def benchmarkFunctionWithFiveHundredResourceInputs(self): self._benchmarkFunctionWithResourceInputs(500, 100) class RemoteWorkerMicroBenchmarks(test.Benchmark): def __init__(self): # used for remote benchmarks os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1" self._cached_server1 = server_lib.Server.create_local_server() self._cached_server_target1 = self._cached_server1.target[len("grpc://"):] self._cached_server2 = server_lib.Server.create_local_server() self._cached_server_target2 = self._cached_server2.target[len("grpc://"):] def _run(self, func, num_iters=10000, execution_mode=None): total_time = run_benchmark(func, num_iters, execution_mode) mean_us = total_time * 1e6 / num_iters self.report_benchmark( iters=num_iters, wall_time=mean_us, extras={"examples_per_sec": num_iters / total_time}) def benchmark_send_mirroring_off(self): remote.connect_to_remote_host(self._cached_server_target1) x = random_ops.random_uniform((2, 2)).cpu() @def_function.function def remote_func(m): return math_ops.matmul(m, m) def func(m): with ops.device("job:worker/replica:0/task:0/device:CPU:0"): return remote_func(m) context.context().mirroring_policy = context.MIRRORING_NONE self._run(lambda: func(x)) # NOTE(b/136184459): Force garbage collecting hanging resources before # subsequent calls to set_server_def, to ensure the destroy resource ops are # executed when their corresponding device and manager are still available. gc.collect() def benchmark_send_mirroring_on(self): remote.connect_to_remote_host(self._cached_server_target1) x = random_ops.random_uniform((2, 2)).cpu() @def_function.function def remote_func(m): return math_ops.matmul(m, m) def func(m): with ops.device("job:worker/replica:0/task:0/device:CPU:0"): return remote_func(m) context.context().mirroring_policy = context.MIRRORING_ALL self._run(lambda: func(x)) # NOTE(b/136184459): Force garbage collecting hanging resources before # subsequent calls to set_server_def, to ensure the destroy resource ops are # executed when their corresponding device and manager are still available. gc.collect() def benchmark_worker_mirroring_off(self): remote.connect_to_remote_host( [self._cached_server_target1, self._cached_server_target2]) with ops.device("job:worker/replica:0/task:1/device:CPU:0"): v = variables.Variable(1.0) @def_function.function def remote_func(): return 1.0 + v def func(): with ops.device("job:worker/replica:0/task:0/device:CPU:0"): return remote_func() context.context().mirroring_policy = context.MIRRORING_NONE self._run(func) # NOTE(b/136184459): Force garbage collecting hanging resources before # subsequent calls to set_server_def, to ensure the destroy resource ops are # executed when their corresponding device and manager are still available. gc.collect() def benchmark_worker_mirroring_on(self): remote.connect_to_remote_host( [self._cached_server_target1, self._cached_server_target2]) with ops.device("job:worker/replica:0/task:1/device:CPU:0"): v = variables.Variable(1.0) @def_function.function def remote_func(): return 1.0 + v def func(): with ops.device("job:worker/replica:0/task:0/device:CPU:0"): return remote_func() context.context().mirroring_policy = context.MIRRORING_ALL self._run(func) # NOTE(b/136184459): Force garbage collecting hanging resources before # subsequent calls to set_server_def, to ensure the destroy resource ops are # executed when their corresponding device and manager are still available. gc.collect() if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/benchmarks_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=unidiomatic-typecheck """Prototype decorator for defining graph functions with eager semantics.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import weakref from tensorflow.python.eager import context from tensorflow.python.eager import function as function_lib from tensorflow.python.eager import lift_to_graph from tensorflow.python.framework import func_graph as func_graph_module from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training.tracking import base as trackable from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util import tf_decorator from tensorflow.python.util.tf_export import tf_export class UnliftedInitializerVariable(resource_variable_ops.UninitializedVariable): """Variable which does not lift its initializer out of function context. Instances of this variable, when created, build a graph which runs their initializer inside a tf.cond(is_initialized) block. This can only be created inside a defun called from (eventually) eager mode. That is, non-function-building graphs are not supported. """ def __init__(self, initial_value=None, trainable=None, caching_device=None, name=None, dtype=None, constraint=None, add_initializers_to=None, lifted_initializer_graph=None, synchronization=None, aggregation=None, shape=None, **unused_kwargs): """Creates a variable. Args: initial_value: A `Tensor`, or Python object convertible to a `Tensor`, which is the initial value for the Variable. The initial value must have a shape specified unless `validate_shape` is set to False. Can also be a callable with no argument that returns the initial value when called. (Note that initializer functions from init_ops.py must first be bound to a shape before being used here.) trainable: If `True`, GradientTapes automatically watch uses of this Variable. caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. If not `None`, caches on another device. Typical use is to cache on the device where the Ops using the Variable reside, to deduplicate copying through `Switch` and other conditional statements. name: Optional name for the variable. Defaults to `'Variable'` and gets uniquified automatically. dtype: If set, initial_value will be converted to the given type. If None, either the datatype will be kept (if initial_value is a Tensor) or float32 will be used (if it is a Python object convertible to a Tensor). constraint: An optional projection function to be applied to the variable after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected Tensor representing the value of the variable and return the Tensor for the projected value (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. add_initializers_to: if not None and not in legacy graph mode, the initializer tensor will be added to this map in addition to adding the assignment to the function. lifted_initializer_graph: FuncGraph to try to lift initializers to. synchronization: Indicates when a distributed a variable will be aggregated. Accepted values are constants defined in the class `tf.VariableSynchronization`. By default the synchronization is set to `AUTO` and the current `DistributionStrategy` chooses when to synchronize. aggregation: Indicates how a distributed variable will be aggregated. Accepted values are constants defined in the class `tf.VariableAggregation`. shape: (optional) The shape of this variable. If None, the shape of `initial_value` will be used. When setting this argument to `tf.TensorShape(None)` (representing an unspecified shape), the variable can be assigned with values of different shapes. Raises: ValueError: If the initial value is not specified, or does not have a shape and `validate_shape` is `True`. RuntimeError: If called outside of a function definition. """ with ops.init_scope(): self._in_graph_mode = not context.executing_eagerly() if not ops.inside_function(): # If we've been init_scope()d out of the function definition nothing to do # here; we can't really do the capturing or conditional logic. resource_variable_ops.ResourceVariable.__init__( self, initial_value=initial_value, trainable=trainable, caching_device=caching_device, name=name, dtype=dtype, constraint=constraint) return if initial_value is None: raise ValueError("initial_value must be specified.") init_from_fn = callable(initial_value) if constraint is not None and not callable(constraint): raise ValueError("The `constraint` argument must be a callable.") if isinstance(initial_value, trackable.CheckpointInitialValue): self._maybe_initialize_trackable() self._update_uid = initial_value.checkpoint_position.restore_uid initial_value = initial_value.wrapped_value with ops.name_scope(name, "Variable", [] if init_from_fn else [initial_value]) as scope_name: with ops.name_scope("Initializer"), ops.device(None): initial_value = ops.convert_to_tensor( initial_value() if init_from_fn else initial_value, name="initial_value", dtype=dtype) assert initial_value is not None # Don't use `shape or initial_value.shape` since TensorShape has # overridden `__bool__`. if shape is None: shape = initial_value.shape # Use the constructor for UninitializedVariable to start. Outside the name # scope so we don't double up the prefix. super(UnliftedInitializerVariable, self).__init__( trainable=trainable, caching_device=caching_device, name=name, shape=shape, dtype=initial_value.dtype, constraint=constraint, synchronization=synchronization, aggregation=aggregation, extra_handle_data=initial_value, **unused_kwargs) with ops.name_scope(scope_name): if self._in_graph_mode: with ops.init_scope(): outer_graph = ops.get_default_graph() func_graph = ops.get_default_graph() function_placeholders = ( func_graph.inputs + func_graph.internal_captures) placeholder_ops = set( [tensor.op for tensor in function_placeholders]) lifted_initializer = lift_to_graph.lift_to_graph( [initial_value], outer_graph, disallowed_placeholders=placeholder_ops)[initial_value] with ops.init_scope(): self._initial_value = lifted_initializer with ops.name_scope("IsInitialized"): self._is_initialized_op = ( resource_variable_ops.var_is_initialized_op(self._handle)) if initial_value is not None: with ops.name_scope("Assign") as n, ops.colocate_with(self._handle): self._initializer_op = resource_variable_ops.assign_variable_op( self._handle, lifted_initializer, name=n) elif context.executing_eagerly(): # In this case, both current scope and init scope are eager. # Assign_variable_op will be executed immediately. So we don't need to # add it to "add_initializers_to" to lift it out. with ops.name_scope("Assign") as n, ops.colocate_with(self._handle): resource_variable_ops.assign_variable_op( self._handle, initial_value, name=n) else: # Init scope is eager but current scope is graph. We will lift out this # variable by addint it into "add_initializers_to". if add_initializers_to is not None: add_initializers_to[self] = initial_value def assign_fn(): with ops.name_scope("Assign") as n, ops.colocate_with(self._handle): resource_variable_ops.assign_variable_op( self._handle, initial_value, name=n) # Returning values to keep tf.cond happy. return ops.convert_to_tensor(1) def not_assign_fn(): return ops.convert_to_tensor(0) # Note: this cond is always guaranteed to run because we're inside a # defun which will insert automatic control dependencies. It will only # execute assign_fn if lifting failed. control_flow_ops.cond( resource_variable_ops.var_is_initialized_op(self._handle), not_assign_fn, assign_fn) RUN_FUNCTIONS_EAGERLY = False @tf_export("config.experimental_run_functions_eagerly") def run_functions_eagerly(run_eagerly): """Enables / disables eager execution of `tf.function`s. After calling `tf.config.experimental_run_functions_eagerly(True)` all invocations of tf.function will run eagerly instead of running through a graph function. This can be useful for debugging or profiling. Similarly, calling `tf.config.experimental_run_functions_eagerly(False)` will revert the behavior of all functions to graph functions. Args: run_eagerly: Boolean. Whether to run functions eagerly. """ global RUN_FUNCTIONS_EAGERLY RUN_FUNCTIONS_EAGERLY = bool(run_eagerly) class FunctionDeleter(object): def __init__(self, func_graph): self.func_graph = func_graph def __del__(self): try: func_graph_module.dismantle_func_graph(self.func_graph) except: # pylint: disable=bare-except # Note: bare except here because this can be noisy at shutdown time. pass class Function(object): """Wrapper class for the graph functions defined for a Python function. See the documentation for `tf.function` for more information on the semantics of defined functions. `Function` is thread-compatible. """ def __init__(self, python_function, name, input_signature=None, autograph=True, experimental_autograph_options=None, experimental_relax_shapes=False, experimental_compile=None): """Initializes a `Function`. Args: python_function: the function to be wrapped. name: the name given to it. input_signature: a possibly nested sequence of `TensorSpec` objects specifying the input signature of this function. If `None`, a separate function is instantiated for each inferred input signature. autograph: whether `python_function` should be converted to graph mode. See https://www.tensorflow.org/guide/autograph for more information. experimental_autograph_options: optional tuple of tensorflow.autograph.Feature values. Allows enabling additional conversion options when autograph is set to True. experimental_relax_shapes: When true, argument shapes may be relaxed to avoid unecessary retracing. experimental_compile: If false, execute the function in a regular way. The function is optimized by some graph rewrite passes (some ops might be clustered into a single op) and interpreted by the standard TensorFlow executor, which dispatches op kernels one by one as they become executable. Set it to false when directly running a multi-device function on TPUs (e.g. two TPU cores, one TPU core and its host CPU). If True, the function is compiled directly by XLA. XLA would fuse all the ops and emit more efficient code to run for some devices (e.g. TPU, XLA_GPU) and some use cases (e.g. dense tensor computation). It requires that the whole function is compilable by XLA. If None (default), compile the function with XLA when running on TPU and go through the regular function execution path when running on other devices. Raises: ValueError: if `input_signature` is not None and the `python_function`'s argspec has keyword arguments. """ self._python_function = python_function self._function_spec = function_lib.FunctionSpec.from_function_and_signature( python_function, input_signature) self._autograph = autograph self._experimental_autograph_options = experimental_autograph_options self.experimental_relax_shapes = experimental_relax_shapes self._experimental_compile = experimental_compile self._created_variables = None self._stateful_fn = None self._stateless_fn = None self._descriptor_cache = weakref.WeakKeyDictionary() self._name = name def _defun_with_scope(self, scope): """Creates a defun wrapped inside a variable creator scope.""" weak_wrapped_fn = None def wrapped_fn(*args, **kwds): """Wraps `self._python_function` in a variable creator scope.""" # We register a variable creator with reduced priority. If an outer # variable creator is just modifying keyword arguments to the variable # constructor, this will work harmoniously. Since the `scope` registered # here actually creates the variable, it taking priority would otherwise # ignore the outer creator. # # If an outer variable creator calls the variable constructor manually, # for example creating a MirroredVariable, then they won't call our # creator. This means we won't be able to trace the initialization graph, # and so variable initializers can't depend on function arguments. This is # better than the alternative, tracing the initialization graph but giving # the user a variable type they didn't want. with ops.get_default_graph()._variable_creator_scope(scope, priority=50): # pylint: disable=protected-access # __wrapped__ allows AutoGraph to swap in a converted function. We give # the function a weak reference to itself to avoid a reference cycle. return weak_wrapped_fn().__wrapped__(*args, **kwds) weak_wrapped_fn = weakref.ref(wrapped_fn) return self._defun(tf_decorator.make_decorator( self._python_function, wrapped_fn)) def _defun(self, fn): """Returns a defun generated from the input function.""" attributes = None if self._experimental_compile is not None: if self._experimental_compile: attributes = {"_XlaCompile": True} else: attributes = {"_XlaCompile": False} return function_lib.defun_with_attributes( fn, input_signature=self.input_signature, attributes=attributes, autograph=self._autograph, experimental_autograph_options=self._experimental_autograph_options, experimental_relax_shapes=self.experimental_relax_shapes) def _initialize(self, args, kwds, add_initializers_to=None): """Initializes, on the first call. Creates two `Function`s, one that will allow creation of variables and one that won't. Additionally runs a trace for the `Function` that allows creation of variables. Args: args: Arguments to the underlying python callable. kwds: Keyword arguments to the python callable. add_initializers_to: Where to collect variable initializers, if not None. """ created_variables = [] lifted_initializer_graph = func_graph_module.FuncGraph("initializer") def variable_capturing_scope(unused_next_creator, **kwds): """Creates UnliftedInitializerVariables and saves references to them.""" v = UnliftedInitializerVariable( add_initializers_to=add_initializers_to, lifted_initializer_graph=lifted_initializer_graph, **kwds) created_variables.append(weakref.ref(v)) return v self._created_variables = created_variables self._stateful_fn = self._defun_with_scope(variable_capturing_scope) self._stateful_fn._name = self._name # pylint: disable=protected-access # Force the definition of the function for these arguments self._lifted_initializer_graph = lifted_initializer_graph self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph) self._concrete_stateful_fn = ( self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access *args, **kwds)) def invalid_creator_scope(*unused_args, **unused_kwds): """Disables variable creation.""" raise ValueError( "tf.function-decorated function tried to create " "variables on non-first call.") self._stateless_fn = self._defun_with_scope(invalid_creator_scope) self._stateless_fn._name = self._name # pylint: disable=protected-access def _decorate(self, decorator): """Allows the captured Python function to be decorated in place. This method is only safe to call when the Function has not been called by a user. It makes sense to use this method to push a decorator into the function rather than wrapping the function in the decorator. We use this in tf.Module to allow user annotated `tf.functions` to remain as `Function` objects but still automatically enter the Module name_scope when they are evaluated like all other methods. Args: decorator: A callable accepting a single argument which is the function to decorate and returning a callable result. Raises: ValueError: If the function has been called a ValueError is raised. """ if self._stateful_fn is not None or self._stateless_fn is not None: raise ValueError( "Functions cannot be decorated after they have been traced.") self._python_function = decorator(self._python_function) self._function_spec = function_lib.FunctionSpec.from_function_and_signature( self._python_function, self.input_signature) def __call__(self, *args, **kwds): """Calls the graph function.""" context.ensure_initialized() if RUN_FUNCTIONS_EAGERLY: return self._python_function(*args, **kwds) if self._created_variables: # In this case we have created variables on the first call, so we run the # defunned version which is guaranteed to never create variables. return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable elif self._stateful_fn is not None: # In this case we have not created variables on the first call. So we can # run the first trace but we should fail if variables are created. results = self._stateful_fn(*args, **kwds) if self._created_variables: raise ValueError("Creating variables on a non-first call to a function" " decorated with tf.function.") return results # This is the first call of __call__, so we have to initialize. initializer_map = object_identity.ObjectIdentityDictionary() self._initialize(args, kwds, add_initializers_to=initializer_map) if self._created_variables: try: # Attempt to initialize variables eagerly and without conds by lifting # out initialization graphs. This is the only initialization strategy # compatible with XLA at the moment. self._initialize_uninitialized_variables(initializer_map) except lift_to_graph.UnliftableError: pass # Fall through to cond-based initialization. else: # Lifting succeeded, so variables are initialized and we can run the # stateless function. return self._stateless_fn(*args, **kwds) else: canon_args, canon_kwds = \ self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access *args, **kwds) # If we did not create any variables the trace we have is good enough. return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds) # pylint: disable=protected-access def fn_with_cond(*inner_args, **inner_kwds): """Conditionally runs initialization if it's needed.""" condition = True for wr in self._created_variables: variable = wr() if variable is None: raise ValueError( "A tf.Variable created inside your tf.function has been" " garbage-collected. Your code needs to keep Python references" " to variables created inside `tf.function`s.\n" "\n" "A common way to raise this error is to create and return a" " variable only referenced inside your function:\n" "\n" "@tf.function\n" "def f():\n" " v = tf.Variable(1.0)\n" " return v\n" "\n" "v = f() # Crashes with this error message!\n" "\n" "The reason this crashes is that @tf.function annotated" " function returns a **`tf.Tensor`** with the **value** of the" " variable when the function is called rather than the" " variable instance itself. As such there is no code holding a" " reference to the `v` created inside the function and Python" " garbage collects it.\n" "\n" "The simplest way to fix this issue is to create variables" " outside the function and capture them:\n" "\n" "v = tf.Variable(1.0)\n" "\n" "@tf.function\n" "def f():\n" " return v\n" "\n" "f() # <tf.Tensor: ... numpy=1.>\n" "v.assign_add(1.)\n" "f() # <tf.Tensor: ... numpy=2.>") condition = math_ops.logical_and( condition, resource_variable_ops.var_is_initialized_op( variable.handle)) # We want to call stateless_fn if possible because it avoids recomputing # potentially expensive initializers. return control_flow_ops.cond( condition, lambda: self._stateless_fn(*inner_args, **inner_kwds), functools.partial(self._concrete_stateful_fn._filtered_call, # pylint: disable=protected-access inner_args, inner_kwds)) # We've created variables and are unable to lift the initialization graphs, # so we fall back to initializing with conds while running the function. canon_args, canon_kwds = \ self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access *args, **kwds) return function_lib.defun(fn_with_cond)(*canon_args, **canon_kwds) @property def python_function(self): """The python function wrapped in this tf.function.""" return self._python_function @property def input_signature(self): return self._function_spec.input_signature @property def function_spec(self): return self._function_spec def _initialize_uninitialized_variables(self, initializer_map): """Make and call a `ConcreteFunction` which initializes variables.""" # Note: using defun here avoids an infinite recursion. @function_lib.defun def initialize_variables(): op_map = object_identity.ObjectIdentityDictionary() for v, init in initializer_map.items(): with ops.init_scope(): if resource_variable_ops.var_is_initialized_op(v.handle): # Ignore variables which are already initialized at trace time. continue op_map = lift_to_graph.lift_to_graph( [init], ops.get_default_graph(), op_map=op_map) v.assign(op_map[init]) with ops.init_scope(): return initialize_variables.get_concrete_function()() def get_initialization_function(self, *args, **kwargs): """Returns a `ConcreteFunction` which initializes this function's variables. Requires that this function hasn't been accessed yet through either calling it or calling get_concrete_function. Fails if we cannot build an initializer function which does not depend on the concrete values of the inputs to this function. Note that running this function will overwrite any values currently assigned to variables, for example restores from a checkpoint. Args: *args: arguments to the underlying python callable. **kwargs: keyword arguments to the python callable. Returns: A `ConcreteFunction` object which initializes the variables of this function. Raises: RuntimeError: if called after the variables have been initialized. """ if self._stateful_fn is not None: raise RuntimeError( "get_initialization_function cannot be called after the function " "has been used") # Here we trace the function, collect the initializers, and attempt to # extract them and run them eagerly. Fail only if we cannot do so. initializer_map = object_identity.ObjectIdentityDictionary() self._initialize(args, kwargs, add_initializers_to=initializer_map) # Note: using defun here avoids an infinite recursion. @function_lib.defun def initialize_variables(): for v, init in initializer_map.items(): v.assign(lift_to_graph.lift_to_graph( [init], ops.get_default_graph())[init]) return initialize_variables.get_concrete_function() def _list_all_concrete_functions_for_serialization(self): """Returns all concrete functions for serialization. Returns: A list of instances of `Function`. """ if self.input_signature is not None: self.get_concrete_function() concrete_functions = [] # pylint: disable=protected-access if self._stateful_fn: concrete_functions.extend( self._stateful_fn._function_cache.all_values()) if self._stateless_fn: concrete_functions.extend( self._stateless_fn._function_cache.all_values()) # pylint: enable=protected-access seen_signatures = [] for concrete_function in concrete_functions: signature = concrete_function.structured_input_signature flattened = nest.flatten(signature) if any( isinstance(arg, func_graph_module.UnknownArgument) for arg in flattened): logging.info("Unsupported signature for serialization: %s.", signature) continue equal_to_signature = functools.partial( function_lib.is_same_structure, signature, check_values=True) if not any(equal_to_signature(s) for s in seen_signatures): seen_signatures.append(signature) # Re-create concrete functions for these signatures. Re-creating ensures # that if the cache key has changed, the function will be traced again. concrete_functions = [] for args, kwargs in seen_signatures: concrete_functions.append(self.get_concrete_function(*args, **kwargs)) return concrete_functions def get_concrete_function(self, *args, **kwargs): """Returns a `ConcreteFunction` specialized to inputs and execution context. If this `Function` was created with an `input_signature`, `args` and `kwargs` may be omitted. With an input signature there is only one concrete function associated with this `Function`. If there is no fixed `input_signature` associated with this `Function`, positional and keyword arguments to `get_concrete_function` follow the same rules as input signature specification, with `tf.TensorSpec` objects describing `tf.Tensor`s which will be passed to the concrete function. Each `tf.Tensor` argument to the concrete function must have a unique name, either because it is the only one associated with a named argument of the Python function or because an explicit `name=` was passed to its `tf.TensorSpec` object. These names become the argument names for the concrete function. Arguments to the concrete function may always be specified as keyword arguments, naming the Tensor input. Positional arguments may be used instead when each preceding argument to the Python function is a Tensor. ```python @tf.function def f(x): return x f_concrete = f.get_concrete_function(tf.TensorSpec([], tf.float64)) f_concrete(tf.constant(1.)) f_concrete(x=tf.constant(1.)) ``` Nested structures containing Tensors may be specified when retrieving concrete functions. Structures with multiple Tensors are expanded into multiple arguments of the concrete function. Since multiple concrete function arguments are associated with one argument to the original function, these Tensors must be named explicitly. Tensors in nested structures may not be passed using positional arguments when calling the concrete function. ```python f_concrete2 = f.get_concrete_function( (tf.TensorSpec(None, tf.float64, name="first"), tf.TensorSpec([], tf.float32, name="second"))) # Keyword arguments are required when identifying Tensors in nested # structures. f_concrete2(first=tf.constant([1.]), second=tf.constant(0.)) ``` Functions with fixed input signatures have only one concrete function associated with them, which can be retrieved without specifying any arguments. As before Tensors must have unique names, either inferred from the argument names in the original Python function or specified explicitly. ```python @tf.function(input_signature=(tf.TensorSpec(None, tf.float32))) def f_sig(y): return y f_sig_concrete = f.get_concrete_function() f_sig_concrete(tf.constant(1.)) f_sig_concrete(y=tf.constant(1.)) ``` Args: *args: inputs to specialize on. **kwargs: inputs to specialize on. Returns: A TensorFlow function which takes exactly one `tf.Tensor` per argument. Raises: ValueError: if this object has not yet been called on concrete values. """ if self._stateful_fn is None: initializer_map = object_identity.ObjectIdentityDictionary() self._initialize(args, kwargs, add_initializers_to=initializer_map) self._initialize_uninitialized_variables(initializer_map) if self._created_variables: # In this case we have created variables on the first call, so we run the # defunned version which is guaranteed to never create variables. return self._stateless_fn.get_concrete_function(*args, **kwargs) elif self._stateful_fn is not None: # In this case we have not created variables on the first call. So we can # run the first trace but we should fail if variables are created. concrete = self._stateful_fn.get_concrete_function(*args, **kwargs) if self._created_variables: raise ValueError("Creating variables on a non-first call to a function" " decorated with tf.function.") return concrete def __get__(self, instance, owner): """Makes it possible to defun instance methods.""" del owner # `instance` here is the instance that this `Function` was accessed through # e.g., for # # class Foo(object): # # @function.defun # def bar(self): # ... # # foo = Foo() # foo.bar() # `foo.bar` is a `Function` instance # # then `instance` will be `foo` (and `owner` will be `Foo`). We create a # new instance of `Function` here to allow different instances each # to create variables once, thereby allowing methods to be decorated with # tf.function. Keeps a cache to avoid retracing the function every time the # descriptor is accessed. if instance not in self._descriptor_cache: if instance is None: return self self._descriptor_cache[instance] = ( function_lib.class_method_to_instance_method(self, instance)) return self._descriptor_cache[instance] @tf_export("function") def function(func=None, input_signature=None, autograph=True, experimental_autograph_options=None, experimental_relax_shapes=False, experimental_compile=None): """Creates a callable TensorFlow graph from a Python function. `function` constructs a callable that executes a TensorFlow graph (`tf.Graph`) created by tracing the TensorFlow operations in `func`. This allows the TensorFlow runtime to apply optimizations and exploit parallelism in the computation defined by `func`. _Example Usage_ ```python def f(x, y): return tf.reduce_mean(tf.multiply(x ** 2, 3) + y) g = tf.function(f) x = tf.constant([[2.0, 3.0]]) y = tf.constant([[3.0, -2.0]]) # `f` and `g` will return the same value, but `g` will be executed as a # TensorFlow graph. assert f(x, y).numpy() == g(x, y).numpy() # Tensors and tf.Variables used by the Python function are captured in the # graph. @tf.function def h(): return f(x, y) assert (h().numpy() == f(x, y).numpy()).all() # Data-dependent control flow is also captured in the graph. Supported # control flow statements include `if`, `for`, `while`, `break`, `continue`, # `return`. @tf.function def g(x): if tf.reduce_sum(x) > 0: return x * x else: return -x // 2 # print and TensorFlow side effects are supported, but exercise caution when # using Python side effects like mutating objects, saving to files, etc. l = [] @tf.function def g(x): for i in x: print(i) # Works tf.compat.v1.assign(v, i) # Works tf.compat.v1.py_func(lambda i: l.append(i))(i) # Works l.append(i) # Caution! Doesn't work. ``` Note that unlike other TensorFlow operations, we don't convert python numerical inputs to tensors. Moreover, a new graph is generated for each distinct python numerical value, for example calling `g(2)` and `g(3)` will generate two new graphs (while only one is generated if you call `g(tf.constant(2))` and `g(tf.constant(3))`). Therefore, python numerical inputs should be restricted to arguments that will have few distinct values, such as hyperparameters like the number of layers in a neural network. This allows TensorFlow to optimize each variant of the neural network. _Referencing `tf.Variable`s_ The Python function `func` may reference stateful objects (such as `tf.Variable`). These are captured as implicit inputs to the callable returned by `function`. For example: ```python c = tf.Variable(0) @tf.function def f(x): c.assign_add(1) return x + tf.compat.v1.to_float(c) assert int(c) == 0 assert f(1.0) == 2.0 assert int(c) == 1 assert f(1.0) == 3.0 assert int(c) == 2 ``` `function` can be applied to methods of an object. For example: ```python class Dense(object): def __init__(self): self.W = tf.Variable(tf.compat.v1.glorot_uniform_initializer()((10, 10))) self.b = tf.Variable(tf.zeros(10)) @tf.function def compute(self, x): return tf.matmul(x, self.W) + self.b d1 = Dense() d2 = Dense() x = tf.random.uniform((10, 10)) # d1 and d2 are using distinct variables assert not (d1.compute(x).numpy() == d2.compute(x).numpy()).all() ``` _Usage with `tf.keras`_ The `call` methods of a `tf.keras.Model` subclass can be decorated with `function` in order to apply graph execution optimizations on it. For example: ```python class MyModel(tf.keras.Model): def __init__(self, keep_probability=0.2): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4) self.dense2 = tf.keras.layers.Dense(5) self.keep_probability = keep_probability @tf.function def call(self, inputs, training=True): y = self.dense2(self.dense1(inputs)) if training: return tf.nn.dropout(y, self.keep_probability) else: return y model = MyModel() model(x, training=True) # executes a graph, with dropout model(x, training=False) # executes a graph, without dropout ``` _Input Signatures_ `function` instantiates a separate graph for every unique set of input shapes and datatypes. For example, the following code snippet will result in three distinct graphs being traced, as each input has a different shape. ```python @tf.function def f(x): return tf.add(x, 1.) scalar = tf.constant(1.0) vector = tf.constant([1.0, 1.0]) matrix = tf.constant([[3.0]]) f(scalar) f(vector) f(matrix) ``` An "input signature" can be optionally provided to `function` to control the graphs traced. The input signature specifies the shape and type of each `Tensor` argument to the function using a `tf.TensorSpec` object. For example, the following code snippet ensures that a single graph is created where the input `Tensor` is required to be a floating point tensor with no restrictions on shape. ```python @tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)]) def f(x): return tf.add(x, 1.) ``` When an `input_signature` is specified, the callable will convert the inputs to the specified TensorSpecs. _Tracing and staging_ When `autograph` is `True`, all Python control flow that depends on `Tensor` values is staged into a TensorFlow graph. When `autograph` is `False`, the function is traced and control flow is not allowed to depend on data. Note that `function` only stages TensorFlow operations, all Python code that `func` executes and does not depend on data will shape the _construction_ of the graph. For example, consider the following: ```python import numpy as np def add_noise(): return tf.eye(5) + np.random.randn(5, 5) traced = tf.function(add_noise) ``` `add_noise()` will return a different output every time it is invoked. However, `traced()` will return the same value every time it is called, since a particular random value generated by the `np.random.randn` call will be inserted in the traced/staged TensorFlow graph as a constant. In this particular example, replacing `np.random.randn(5, 5)` with `tf.random.normal((5, 5))` will result in the same behavior for `add_noise()` and `traced()`. _Python Side-Effects_ A corollary of the previous discussion on tracing is the following: If a Python function `func` has Python side-effects, then executing `func` multiple times may not be semantically equivalent to executing `F = tf.function(func)` multiple times; this difference is due to the fact that `function` only captures the subgraph of TensorFlow operations that is constructed when `func` is invoked to trace a graph. The same is true if code with Python side effects is used inside control flow, such as a loop. If your code uses side effects that are not intended to control graph construction, wrap them inside `tf.compat.v1.py_func`. _Retracing_ A single tf.function object might need to map to multiple computation graphs under the hood. This should be visible only as performance (tracing graphs has a nonzero computational and memory cost) but should not affect the correctness of the program. A traced function should return the same result as it would when run eagerly, assuming no unintended Python side-effects. Calling a `tf.function` with tensor arguments of different dtypes should lead to at least one computational graph per distinct set of dtypes. Alternatively, always calling a `tf.function` with tensor arguments of the same shapes and dtypes and the same non-tensor arguments should not lead to additional retracings of your function. Other than that, TensorFlow reserves the right to retrace functions as many times as needed, to ensure that traced functions behave as they would when run eagerly and to provide the best end-to-end performance. For example, the behavior of how many traces TensorFlow will do when the function is repeatedly called with different python scalars as arguments is left undefined to allow for future optimizations. To control the tracing behavior, use the following tools: - different `tf.function` objects are guaranteed to not share traces; and - specifying a signature or using concrete function objects returned from get_concrete_function() guarantees that only one function graph will be built. Args: func: function to be compiled. If `func` is None, returns a decorator that can be invoked with a single argument - `func`. The end result is equivalent to providing all the arguments up front. In other words, `tf.function(input_signature=...)(func)` is equivalent to `tf.function(func, input_signature=...)`. The former can be used to decorate Python functions, for example: @tf.function(input_signature=...) def foo(...): ... input_signature: A possibly nested sequence of `tf.TensorSpec` objects specifying the shapes and dtypes of the Tensors that will be supplied to this function. If `None`, a separate function is instantiated for each inferred input signature. If input_signature is specified, every input to `func` must be a `Tensor`, and `func` cannot accept `**kwargs`. autograph: Whether autograph should be applied on `func` before tracing a graph. This allows for dynamic control flow (Python if's, loops etc.) in the traced graph. See https://www.tensorflow.org/guide/autograph for more information. experimental_autograph_options: Experimental knobs (in the form of a tuple of tensorflow.autograph.Feature values) to control behavior when autograph=True. experimental_relax_shapes: When true, argument shapes may be relaxed to avoid unecessary retracing. experimental_compile: If false, execute the function in a regular way. The function is optimized by some graph rewrite passes (some ops might be clustered into a single op) and interpreted by the standard TensorFlow executor, which dispatches op kernels one by one as they become executable. Set it to false when directly running a multi-device function on TPUs (e.g. two TPU cores, one TPU core and its host CPU). If True, the function is compiled directly by XLA (https://www.tensorflow.org/xla). XLA would fuse all the ops and emit more efficient code to run for some devices (e.g. TPU, XLA_GPU) and some use cases (e.g. dense tensor computation). It requires that the whole function is compilable by XLA (e.g. static tensor shape, a subset of operations, no string, compile-time constant input, etc). If None (default), compile the function with XLA when running on TPU and go through the regular function execution path when running on other devices. Note: TensorArrays on TPU don't work with standard TensorFlow executor. Returns: If `func` is not None, returns a callable that will execute the compiled function (and return zero or more `tf.Tensor` objects). If `func` is None, returns a decorator that, when invoked with a single `func` argument, returns a callable equivalent to the case above. Raises: TypeError: If `input_signature` is neither `None` nor a sequence of `TensorSpec` objects. """ if input_signature is not None: function_lib.validate_signature(input_signature) def decorated(inner_function): try: name = inner_function.__name__ except AttributeError: name = "function" return tf_decorator.make_decorator( inner_function, Function( inner_function, name, input_signature=input_signature, autograph=autograph, experimental_autograph_options=experimental_autograph_options, experimental_relax_shapes=experimental_relax_shapes, experimental_compile=experimental_compile)) # This code path is for the `foo = tf.function(foo, ...)` use case if func is not None: return decorated(func) # This code path is for the # # @tf.function(...) # def foo(...): # ... # # use case, which is equivalent to `foo = tf.function(...)(foo)` return decorated
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/def_function.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=unidiomatic-typecheck """Prototype decorator for defining legacy-graph-mode functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import weakref from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import struct_pb2 from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.eager import lift_to_graph from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import func_graph from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import nested_structure_coder from tensorflow.python.training.tracking import data_structures from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util.tf_export import tf_export class VariableHolder(object): """Holds variables for a python function.""" def __init__(self, fn=None, share_variables=False): self._fn = fn self._share_variables = share_variables self._variables_by_name = data_structures.Mapping() @property def variables(self): return self._variables_by_name def variable_creator_scope(self, next_creator, **kwargs): """Creates variables & adds them to collections to match legacy code.""" collections = kwargs.pop("collections", None) v = None # Get expected variable name. with ops.name_scope(kwargs.get("name", None), "Variable") as name: variable_name = ops.name_from_scope_name(name) kwargs["name"] = name if self._share_variables: v = self._variables_by_name.get(variable_name, None) if v is None: v = next_creator(**kwargs) self._variables_by_name[variable_name] = v if collections is None: collections = [ops.GraphKeys.GLOBAL_VARIABLES] if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections: collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES] ops.add_to_collections(collections, v) return v def __call__(self, *args, **kwargs): return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs) def call_with_variable_creator_scope(self, fn): def wrapped(*args, **kwargs): with variable_scope.variable_creator_scope(self.variable_creator_scope): return fn(*args, **kwargs) return wrapped def _get_element_from_tensor_info(tensor_info, graph): """Simplified copy of the deprecated `get_tensor_from_tensor_info`.""" encoding = tensor_info.WhichOneof("encoding") if encoding == "name": # We may get operations here in some cases. TensorInfo is a bit of a # misnomer if so. return graph.as_graph_element(tensor_info.name) elif encoding == "coo_sparse": return sparse_tensor.SparseTensor( graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name), graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name), graph.get_tensor_by_name( tensor_info.coo_sparse.dense_shape_tensor_name)) elif encoding == "composite_tensor": struct_coder = nested_structure_coder.StructureCoder() spec_proto = struct_pb2.StructuredValue( type_spec_value=tensor_info.composite_tensor.type_spec) spec = struct_coder.decode_proto(spec_proto) components = [graph.get_tensor_by_name(component.name) for component in tensor_info.composite_tensor.components] return spec._from_components(components) # pylint: disable=protected-access else: raise ValueError("Invalid TensorInfo.encoding: %s" % encoding) def _lift_single_variable(old_variable, graph, variable_holder): """Lifts `old_variable` out of the `FuncGraph` `graph`.""" new_variable = resource_variable_ops.UninitializedVariable( shape=old_variable.shape, dtype=old_variable.dtype, name=old_variable.op.name, trainable=old_variable.trainable, extra_handle_data=old_variable.handle) new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access graph.add_capture(new_variable.handle, old_variable.handle) # Now that we've added the new variable to graph.captures, # graph.capture will use that cached value and do some post-processing # on the capture like recording it on the tape. graph.capture(new_variable.handle) # pylint: disable=protected-access variable_name = new_variable.name.split(":")[0] variable_holder._variables_by_name[variable_name] = new_variable graph._weak_variables.append(weakref.ref(new_variable)) # pylint: enable=protected-access graph.watch_variable(new_variable) return new_variable def _lift_unlifted_variables(graph, variable_holder): """Finds resource variables and lifts them into the outer context. When we import a GraphDef inside a wrap_function, no Python graph building code runs. This means we get VarHandleOps which create variable resources, but no corresponding Python objects. Leaving them like this works but gives the user no way to interact with or modify the variables outside the graph. This method searches for variables and lifts them out as regular variable objects when possible, indicating to the FuncGraph that they are captures. Args: graph: The FuncGraph to lift variables from. variable_holder: A VariableHolder to record the lifted variables in. """ with graph.as_default(): global_collection_variables = ops.get_collection( ops.GraphKeys.GLOBAL_VARIABLES) local_collection_variables = ops.get_collection( ops.GraphKeys.LOCAL_VARIABLES) existing_captures = object_identity.ObjectIdentitySet( graph.internal_captures) lifted_variables = object_identity.ObjectIdentityDictionary() def _should_lift_variable(v): return ((v._in_graph_mode # pylint: disable=protected-access and v.graph.building_function) and isinstance(v, resource_variable_ops.BaseResourceVariable) and v.handle not in existing_captures) for old_variable in global_collection_variables: if _should_lift_variable(old_variable): new_variable = _lift_single_variable( old_variable, graph, variable_holder) lifted_variables[old_variable] = new_variable existing_captures.add(old_variable.handle) for old_variable in local_collection_variables: if _should_lift_variable(old_variable): new_variable = _lift_single_variable( old_variable, graph, variable_holder) lifted_variables[old_variable] = new_variable existing_captures.add(old_variable.handle) if new_variable._in_graph_mode: # pylint: disable=protected-access outer_graph = new_variable.graph # Variables are added to the global collection by default. In this # case we only want the variable in the local collection, so we'll pop # it out. global_collection = outer_graph.get_collection_ref( ops.GraphKeys.GLOBAL_VARIABLES) global_collection.remove(new_variable) outer_graph.add_to_collection( ops.GraphKeys.LOCAL_VARIABLES, new_variable) # Update the FuncGraph's collections, partly for the user and partly so this # function is idempotent when it runs again in prune() calls. for collection_name in [ ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES ]: mutable_collection = ops.get_collection_ref(collection_name) for index, current in enumerate(mutable_collection): mutable_collection[index] = lifted_variables.get(current, current) if not resource_variable_ops.is_resource_variable( mutable_collection[index]): logging.warning( "Unable to create a python object for variable {} because it is " "a reference variable. It may not be visible to training APIs. " "If this is a problem, consider rebuilding the SavedModel after " "running tf.compat.v1.enable_resource_variables().".format( mutable_collection[index])) # TODO(allenl): make this trackable class WrappedFunction(function.ConcreteFunction): """Wraps a tf V1 piece of code in a function.""" def __init__(self, fn_graph, variable_holder, attrs=None, signature=None): self._variable_holder = variable_holder _lift_unlifted_variables(fn_graph, variable_holder) # We call __init__ after lifting variables so that the function's signature # properly reflects the new captured inputs. for f in fn_graph.as_graph_def().library.function: context.context().add_function_def(f) super(WrappedFunction, self).__init__( fn_graph, attrs=attrs, signature=signature) def prune(self, feeds, fetches, name=None, input_signature=None): """Extract a subgraph of this function's underlying graph. Wraps the subgraph in a new `WrappedFunction` object. Args: feeds: Input tensors to the subgraph to extract, as `Tensor` objects. fetches: Possibly-nested Python data structure containing information about outputs of the target subgraph. Each entry can either be a `Tensor` object (for data outputs), an `Operation` object (for control outputs), or a `TensorInfo` proto. Any additional shape/dtype information provided in a `TensorInfo` and not present in the original graph will be added to the returned subgraph. name: (optional) Name to give to the underlying `FuncGraph` of the returned object. If no name is provided, the graph's name will be `"pruned"`. input_signature: (optional) possibly-nested Python data structure containing `TensorSpec` objects, with which to populate the returned functions's `FuncGraph`'s `structured_input_signature` field. Returns: A new `WrappedFunction` object containing a copy of the portion of this object's graph that goes from `feeds` to `fetches`. """ # TODO(b/129646028): Add support for CompositeTensors. name = name or "pruned" flat_feeds = nest.flatten(feeds, expand_composites=True) flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds] for f in flat_feeds: if not isinstance(f, ops.Tensor): raise ValueError("Feeds must be tensors.") # Ignoring all feeds that are captures allows prune to be called # using wrapped_func.inputs even when it uses variables internal_captures = object_identity.ObjectIdentitySet( self.graph.internal_captures) flat_feeds = [f for f in flat_feeds if f not in internal_captures] operation_fetches = [] tensor_fetches = [] tensor_infos = [] def _fetch_preprocesing_callback(fetch): """Extract out lists of ops, tensors, and tensor type info. Turns TensorInfos into Tensors in the original `fetches` structure. Also extracts ops from `fetches`. Args: fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or string identifying a Tensor or Operation. Returns: `fetch` converted to a Tensor. """ if isinstance(fetch, ops.Operation): operation_fetches.append(fetch) return fetch elif isinstance(fetch, meta_graph_pb2.TensorInfo): tensor_infos.append(fetch) decoded = _get_element_from_tensor_info(fetch, self._func_graph) if (tensor_util.is_tensor(decoded) or isinstance(decoded, composite_tensor.CompositeTensor)): tensor_fetches.append(decoded) else: operation_fetches.append(decoded) return decoded elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)): tensor_fetches.append(fetch) return fetch else: graph_element = self.graph.as_graph_element(fetch) return _fetch_preprocesing_callback(graph_element) fetches = nest.map_structure(_fetch_preprocesing_callback, fetches) # Expand composite tensors into their component dense Tensors. tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True) for f in (flat_feeds + tensor_fetches + operation_fetches): if f.graph is not self._func_graph: raise ValueError("Can only prune function whose feeds and fetches " "are from this graph (%s). Input %s is from graph %s" % (self._func_graph, f, f.graph)) with self._func_graph.as_default(): pruned_graph = func_graph.FuncGraph(name) lift_map = lift_to_graph.lift_to_graph( operation_fetches + tensor_fetches, pruned_graph, sources=flat_feeds + self.graph.internal_captures) # Note that we add the component tensors of any composite tensors to the # returned function's outputs list; the list must contain these component # tensors, or the function's sparse outputs won't work properly. pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches) pruned_graph.control_outputs.extend( [lift_map[operation] for operation in operation_fetches]) pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds) for external_capture, internal_capture in self.graph.captures: pruned_graph.add_capture(external_capture, lift_map[internal_capture]) for ti in tensor_infos: if ti.WhichOneof("encoding") == "name": # Dense tensors only t = pruned_graph.as_graph_element(ti.name) if tensor_util.is_tensor(t): t.set_shape(tensor_shape.TensorShape(ti.tensor_shape)) # pylint: disable=protected-access for f in self.graph._functions.values(): pruned_graph._add_function(f) # pylint: enable=protected-access pruned_graph.variables = self.graph.variables def _structured_output_mapping(fetched): """callback for `nest.map_structure()`""" lifted = lift_map[fetched] if isinstance(lifted, ops.Operation): return None return lifted # expand_composites=True here causes composite tensors to be expanded # into their component dense Tensors, mapped to the new graph, and then # reconstituted into their original composite form. pruned_graph.structured_outputs = nest.map_structure( _structured_output_mapping, fetches, expand_composites=True) pruned_graph.structured_input_signature = input_signature pruned_fn = WrappedFunction( pruned_graph, variable_holder=self._variable_holder) pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access # TODO(kathywu): Enable keyword arguments if an input signature is specified pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access return pruned_fn def _filter_returned_ops(fn): """Filtering out any ops returned by function. Args: fn: a function Returns: A tuple of ( Wrapped function that returns `None` in place of any ops, dict that maps the index in the flat output structure to the returned op ) """ returned_ops = {} def wrap_and_filter_returned_ops(*args, **kwargs): outputs = fn(*args, **kwargs) flat_outputs = nest.flatten(outputs) for n in range(len(flat_outputs)): output = flat_outputs[n] if isinstance(output, ops.Operation): returned_ops[n] = output flat_outputs[n] = None return nest.pack_sequence_as(outputs, flat_outputs) return wrap_and_filter_returned_ops, returned_ops class WrappedGraph(object): """Class for wrapping multiple TF 1.X functions in a single graph. Maintains a dictionary mapping names to wrapped functions. See `tf.compat.v1.wrap_function` to learn more about wrapping V1 functions. Functions wrapped using this class have access to variables and collections created in other wrapped functions, using the standard TF 1.X API ( `tf.compat.v1.get_variable` or `tf.compat.v1.get_default_graph().get_collection(...)`) Outside a function, variables and collections may be accessed using the `variables` and `graph` properties. Example: ``` def add_v1(x): with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE): v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32) return v + x def increment_var_v1(x): with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE): v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32) return v.assign_add(x) g = WrappedGraph() add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)]) increment_var = g.wrap_function(increment_var_v1, [tf.TensorSpec([], tf.int32)]) assert len(g.variables) == 1 assert g.variables[0].numpy() == 0 increment_var(tf.constant(5)) assert g.variables[0].numpy() == 5 ``` """ def __init__(self, variable_holder=None, **kwargs): self._variable_holder = ( variable_holder or VariableHolder(share_variables=True)) name = kwargs.pop("name", "wrapped_function_graph") # Always start with empty collections, unless otherwise specified. Setting # `collections=None` will copy the collections from the outer graph. collections = kwargs.pop("collections", {}) self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs) self._wrapped_function = WrappedFunction(self.graph, self._variable_holder) self._functions = {} @property def functions(self): return self._functions @property def variables(self): return self._variable_holder.variables def wrap_function(self, fn, signature, name=None): """Wraps a TF 1.X function and returns an eager-compatible function. All functions wrapped in the same `WrappedGraph` will have access to the same graph (`tf.compat.v1.get_default_graph` to get the graph object within a function, or `WrappedGraph.graph` to get the graph outside a function). Variables created within the function will be added to the `variables` list. Function inputs: All inputs to the function must be tensors (nested ok), with their shapes and dtypes defined in the `signature` argument. Function outputs: * The 1.X function may return tensors, variables, and ops. The wrapped eager-compatible function will always return tensors in the same nested structure. * Variables are replaced with a tensor containing the latest read values. * Returned ops are executed, and replaced with None. * The order of op execution and variable reads in the return is nondeterministic. For example: ``` def update_var(x): v = tf.Variable(0) op = tf.compat.v1.assign(v, x).op return v, op g = WrappedGraph() fn = g.wrap_function(update_var) read_value, _ = fn(tf.constant(3)) print(read_value.numpy()) # could be 0 or 3 print(g.variables[0].numpy()) # always 3 ``` To ensure that ops in the function are executed (e.g. ops added to the `tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns. Args: fn: a 1.X tensorflow function. signature: a possibly nested sequence of `TensorSpecs` specifying the shapes and dtypes of the arguments. name: an optional string name for the function. The function will be saved with key `name` in the `functions` dictionary. Returns: An eager-compatible function. """ return self._wrap_function(fn, signature=signature, name=name) def _wrap_function(self, fn, args=None, kwargs=None, signature=None, name=None): """Internal wrap function method with extended func_graph arguments.""" fn_with_filter_and_scope, returned_ops = _filter_returned_ops( self._variable_holder.call_with_variable_creator_scope(fn)) func_graph.func_graph_from_py_func( None, # Name is unused. fn_with_filter_and_scope, args=args, kwargs=kwargs, signature=signature, add_control_dependencies=False, func_graph=self.graph) # This code relies on questional behavior from `func_graph_from_py_func`. # If an existing FuncGraph is passed into the `func_graph` arg, the inputs # and structured outputs are overwritten. Pretty sure this is a bug, # because structured outputs doesn't match up with the outputs... fn_inputs = self.graph.inputs[:-len(self.graph.captures)] # Return filtered ops to the flattened outputs. flat_fn_outputs = nest.flatten(self.graph.structured_outputs) for index, op in returned_ops.items(): flat_fn_outputs[index] = op fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs, flat_fn_outputs) name = name or fn.__name__ wrapped_function = self._wrapped_function.prune( fn_inputs, fn_outputs, name, self.graph.structured_input_signature) self._functions[name] = wrapped_function return wrapped_function @tf_export(v1=["wrap_function"]) def wrap_function(fn, signature, name=None): """Wraps the TF 1.x function fn into a graph function. The python function `fn` will be called once with symbolic arguments specified in the `signature`, traced, and turned into a graph function. Any variables created by `fn` will be owned by the object returned by `wrap_function`. The resulting graph function can be called with tensors which match the signature. ```python def f(x, do_add): v = tf.Variable(5.0) if do_add: op = v.assign_add(x) else: op = v.assign_sub(x) with tf.control_dependencies([op]): return v.read_value() f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True]) assert float(f_add(1.0)) == 6.0 assert float(f_add(1.0)) == 7.0 # Can call tf.compat.v1.wrap_function again to get a new trace, a new set # of variables, and possibly different non-template arguments. f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False]) assert float(f_sub(1.0)) == 4.0 assert float(f_sub(1.0)) == 3.0 ``` Both `tf.compat.v1.wrap_function` and `tf.function` create a callable TensorFlow graph. But while `tf.function` runs all stateful operations (e.g. `tf.print`) and sequences operations to provide the same semantics as eager execution, `wrap_function` is closer to the behavior of `session.run` in TensorFlow 1.x. It will not run any operations unless they are required to compute the function's outputs, either through a data dependency or a control dependency. Nor will it sequence operations. Unlike `tf.function`, `wrap_function` will only trace the Python function once. As with placeholders in TF 1.x, shapes and dtypes must be provided to `wrap_function`'s `signature` argument. Since it is only traced once, variables and state may be created inside the function and owned by the function wrapper object. Args: fn: python function to be wrapped signature: the placeholder and python arguments to be passed to the wrapped function name: Optional. The name of the function. Returns: the wrapped graph function. """ holder = VariableHolder(fn) func_graph_name = "wrapped_function" if name is not None: func_graph_name = "wrapped_function_" + name return WrappedFunction( func_graph.func_graph_from_py_func( func_graph_name, holder, args=None, kwargs=None, signature=signature, add_control_dependencies=False, collections={}), variable_holder=holder, signature=signature) def function_from_graph_def(graph_def, inputs, outputs): """Creates a ConcreteFunction from a GraphDef. Args: graph_def: A GraphDef to make a function out of. inputs: A Tensor name or nested structure of names in `graph_def` which should be inputs to the function. outputs: A Tensor name or nested structure of names in `graph_def` which should be outputs of the function. Returns: A ConcreteFunction. """ def _imports_graph_def(): importer.import_graph_def(graph_def, name="") wrapped_import = wrap_function(_imports_graph_def, []) import_graph = wrapped_import.graph return wrapped_import.prune( nest.map_structure(import_graph.as_graph_element, inputs), nest.map_structure(import_graph.as_graph_element, outputs))
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/wrap_function.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """TensorFlow 2.0 Profiler for both Eager Mode and Graph Mode. The profiler has two mode: - Programmatic Mode: start(), stop() and Profiler class. It will perform when calling start() or create Profiler class and will stop when calling stop() or destroying Profiler class. - On-demand Mode: start_profiler_server(). It will perform profiling when receive profiling request. NOTE: Only one active profiler session is allowed. Use of simultaneous Programmatic Mode and On-demand Mode is undefined and will likely fail. NOTE: The Keras TensorBoard callback will automatically perform sampled profiling. Before enabling customized profiling, set the callback flag "profile_batches=[]" to disable automatic sampled profiling. customized profiling. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os import threading from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.framework import c_api_util from tensorflow.python.platform import gfile from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat _profiler = None _profiler_lock = threading.Lock() _run_num = 0 # This suffix should be kept in sync with kProfileEmptySuffix in # tensorflow/core/profiler/rpc/client/capture_profile.cc. _EVENT_FILE_SUFFIX = '.profile-empty' class ProfilerAlreadyRunningError(Exception): pass class ProfilerNotRunningError(Exception): pass def start(): """Start profiling. Raises: ProfilerAlreadyRunningError: If another profiling session is running. """ global _profiler with _profiler_lock: if _profiler is not None: raise ProfilerAlreadyRunningError('Another profiler is running.') if context.default_execution_mode == context.EAGER_MODE: context.ensure_initialized() _profiler = pywrap_tensorflow.TFE_NewProfiler() if not pywrap_tensorflow.TFE_ProfilerIsOk(_profiler): logging.warning('Another profiler session is running which is probably ' 'created by profiler server. Please avoid using profiler ' 'server and profiler APIs at the same time.') def stop(): """Stop current profiling session and return its result. Returns: A binary string of tensorflow.tpu.Trace. User can write the string to file for offline analysis by tensorboard. Raises: ProfilerNotRunningError: If there is no active profiling session. """ global _profiler global _run_num with _profiler_lock: if _profiler is None: raise ProfilerNotRunningError( 'Cannot stop profiling. No profiler is running.') if context.default_execution_mode == context.EAGER_MODE: context.context().executor.wait() with c_api_util.tf_buffer() as buffer_: pywrap_tensorflow.TFE_ProfilerSerializeToString( _profiler, buffer_) result = pywrap_tensorflow.TF_GetBuffer(buffer_) pywrap_tensorflow.TFE_DeleteProfiler(_profiler) _profiler = None _run_num += 1 return result def maybe_create_event_file(logdir): """Create an empty event file if not already exists. This event file indicates that we have a plugins/profile/ directory in the current logdir. Args: logdir: log directory. """ for file_name in gfile.ListDirectory(logdir): if file_name.endswith(_EVENT_FILE_SUFFIX): return # TODO(b/127330388): Use summary_ops_v2.create_file_writer instead. event_writer = pywrap_tensorflow.EventsWriter( compat.as_bytes(os.path.join(logdir, 'events'))) event_writer.InitWithSuffix(compat.as_bytes(_EVENT_FILE_SUFFIX)) def save(logdir, result): """Save profile result to TensorBoard logdir. Args: logdir: log directory read by TensorBoard. result: profiling result returned by stop(). """ plugin_dir = os.path.join( logdir, 'plugins', 'profile', datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) gfile.MakeDirs(plugin_dir) maybe_create_event_file(logdir) with gfile.Open(os.path.join(plugin_dir, 'local.trace'), 'wb') as f: f.write(result) def start_profiler_server(port): """Start a profiler grpc server that listens to given port. The profiler server will keep the program running even the training finishes. Please shutdown the server with CTRL-C. It can be used in both eager mode and graph mode. The service defined in tensorflow/core/profiler/profiler_service.proto. Please use tensorflow/contrib/tpu/profiler/capture_tpu_profile to capture tracable file following https://cloud.google.com/tpu/docs/cloud-tpu-tools#capture_trace Args: port: port profiler server listens to. """ if context.default_execution_mode == context.EAGER_MODE: context.ensure_initialized() pywrap_tensorflow.TFE_StartProfilerServer(port) class Profiler(object): """Context-manager eager profiler api. Example usage: ```python with Profiler("/path/to/logdir"): # do some work ``` """ def __init__(self, logdir): self._logdir = logdir def __enter__(self): start() def __exit__(self, typ, value, tb): result = stop() save(self._logdir, result)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/profiler.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import wrap_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import importer as graph_def_importer from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.ops.ragged import ragged_factory_ops from tensorflow.python.ops.ragged import ragged_tensor from tensorflow.python.platform import test from tensorflow.python.training import saver as saver_lib class WrapFunctionTest(test.TestCase): def testDocString(self): def f(x, do_add): v = variables.Variable(5.0) if do_add: op = v.assign_add(x) else: op = v.assign_sub(x) with ops.control_dependencies([op]): return v.read_value() f_add = wrap_function.wrap_function( f, [tensor_spec.TensorSpec((), dtypes.float32), True]) self.assertAllEqual(f_add(1.0), 6.0) self.assertAllEqual(f_add(1.0), 7.0) # Can call tf.compat.v1.wrap_function again to get a new trace, a new set # of variables, and possibly different non-template arguments. f_sub = wrap_function.wrap_function( f, [tensor_spec.TensorSpec((), dtypes.float32), False]) self.assertAllEqual(f_sub(1.0), 4.0) self.assertAllEqual(f_sub(1.0), 3.0) def testPrune(self): x_in = [] x_out = [] def f(x, y): x_in.append(x) xx = x * x x_out.append(xx) return xx, 2 * y*y f_wrapped = wrap_function.wrap_function( f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2) f_pruned = f_wrapped.prune(x_in[0], [x_out[0]]) self.assertAllEqual(f_pruned(ops.convert_to_tensor(2.0)), [4.0]) def testPruneRagged(self): x_in = [] x_out = [] def f(x, y): x_in.append(x) xx = x * x x_out.append(xx) return xx, y * y x_spec = ragged_tensor.RaggedTensorSpec([None, None], dtypes.float32) y_spec = tensor_spec.TensorSpec((), dtypes.float32) f_wrapped = wrap_function.wrap_function(f, [x_spec, y_spec]) f_pruned = f_wrapped.prune(x_in[0], x_out[0]) rt = ragged_factory_ops.constant([[1.0, 2.0], [3.0]]) expected = ragged_factory_ops.constant_value([[1.0, 4.0], [9.0]]) # Note: when we call f_pruned, we must pass the RaggedTensor in using # its components, since that's the current convention for how concrete # functions handle structured inputs. self.assertAllEqual(f_pruned(rt.values, rt.row_splits), expected) def _assert_single_captured_variable_argument(self, graph_def): # The single FunctionDef should have one argument, a captured variable function_def, = graph_def.library.function self.assertLen(function_def.signature.input_arg, 1) function_arg, = function_def.signature.input_arg self.assertEqual(dtypes.resource, dtypes.as_dtype(function_arg.type)) def testVariableLifting(self): save_prefix = os.path.join(self.get_temp_dir(), 'meta_graph_test') export_graph = ops.Graph() with export_graph.as_default(): v = variables.Variable(1.) array_ops.identity(v + 1., name='output') saver = saver_lib.Saver([v]) with self.test_session() as session: session.run(v.initializer) saver.save(session, save_prefix) def importer(): saver_lib.import_meta_graph(save_prefix + '.meta') return ops.get_default_graph().as_graph_element('output:0') wrapped = wrap_function.wrap_function(importer, []) lifted_variables = list(wrapped.graph.variables) self.assertLen(lifted_variables, 1) initializer = wrapped.prune( [], wrapped.graph.as_graph_element(v.initializer.name)) self.assertEqual(lifted_variables, list(initializer.graph.variables)) self.assertEqual(initializer.graph.external_captures, wrapped.graph.external_captures) @def_function.function def wraps_initializer(): initializer() wraps_initializer() self.assertEqual(1., lifted_variables[0].numpy()) wrapped_initializer_graphdef = ( wraps_initializer.get_concrete_function().graph.as_graph_def()) self._assert_single_captured_variable_argument(wrapped_initializer_graphdef) @def_function.function def wraps_wrapped(): return wrapped() # Verify that the original graph also has the correct signature. wrapped_wrapped_graphdef = ( wraps_wrapped.get_concrete_function().graph.as_graph_def()) self._assert_single_captured_variable_argument(wrapped_wrapped_graphdef) # Now check that the graph runs wrapped, from eager, and when pruned. self.assertAllEqual(wraps_wrapped().numpy(), lifted_variables[0].numpy() + 1.) self.assertAllEqual(wrapped().numpy(), lifted_variables[0].numpy() + 1.) pruned = wrapped.prune([], wrapped.graph.as_graph_element('output:0')) self.assertAllEqual(wrapped().numpy(), pruned().numpy()) def testNoArguments(self): def f(): return constant_op.constant(1.) f_wrapped = wrap_function.wrap_function(f, []) self.assertAllEqual(1.0, f_wrapped()) def testPruneCaptures(self): v1 = variables.Variable(2.) def f(): v2 = variables.Variable(3.) return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch') f_wrapped = wrap_function.wrap_function(f, []) self.assertAllEqual(6.0, f_wrapped()) # Test pruning directly on the inputs pruned = f_wrapped.prune( feeds=f_wrapped.inputs, fetches=f_wrapped.graph.get_tensor_by_name('fetch:0')) self.assertAllEqual(6.0, pruned()) # Test pruning with no inputs pruned = f_wrapped.prune( feeds=(), fetches=f_wrapped.graph.get_tensor_by_name('fetch:0')) self.assertAllEqual(6.0, pruned()) def testCollectionsIsolation(self): v1 = variables.Variable(2.) v2_holder = [] def f(): v2 = variables.Variable(3.) v2_holder.append(v2) ops.add_to_collection(ops.GraphKeys.LOSSES, v2 * constant_op.constant(3.)) return array_ops.identity(v1 * v2 * constant_op.constant(1.), 'fetch') f_wrapped = wrap_function.wrap_function(f, []) self.assertAllEqual(6.0, f_wrapped()) self.assertEqual( len(f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1) f_var_collection = f_wrapped.graph.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual(len(f_var_collection), 1) self.assertIs(f_var_collection[0], v2_holder[0]) v3_holder = [] def g(): v3 = variables.Variable(4.) v3_holder.append(v3) ops.add_to_collection(ops.GraphKeys.LOSSES, v3 * constant_op.constant(3.)) return array_ops.identity(v1 * v3 * constant_op.constant(1.), 'fetch') g_wrapped = wrap_function.wrap_function(g, []) self.assertAllEqual(8.0, g_wrapped()) self.assertEqual( len(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)), 1) g_var_collection = g_wrapped.graph.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) self.assertEqual(len(g_var_collection), 1) self.assertIs(g_var_collection[0], v3_holder[0]) # Both have only one value, and their values aren't equal. So no sharing. self.assertIsNot(g_wrapped.graph.get_collection(ops.GraphKeys.LOSSES[0]), f_wrapped.graph.get_collection(ops.GraphKeys.LOSSES)[0]) def testGradientsOfPrune(self): v1 = variables.Variable(2.) v2_holder = [] def f(z): v2 = variables.Variable(3.) v2_holder.append(v2) return array_ops.identity(v1 * v2 * z, 'fetch') f_wrapped = wrap_function.wrap_function( f, [tensor_spec.TensorSpec((), dtype=dtypes.float32)]) x = constant_op.constant(1.) with backprop.GradientTape() as tape: tape.watch(x) out = f_wrapped(x) grads = tape.gradient(out, [x, v1, v2_holder[0]]) self.assertAllEqual(6.0, out) self.assertAllEqual([6.0, 3.0, 2.0], grads) pruned = f_wrapped.prune( feeds=f_wrapped.inputs, fetches=f_wrapped.graph.get_tensor_by_name('fetch:0')) x = constant_op.constant(1.) with backprop.GradientTape() as tape: tape.watch(x) out = pruned(x) grads = tape.gradient(out, [x, v1, v2_holder[0]]) self.assertAllEqual(6.0, out) self.assertAllEqual([6.0, 3.0, 2.0], grads) def testPruneOperations(self): v = variables.Variable(0) def f(): v.assign_add(1, name='increment', read_value=False) f_wrapped = wrap_function.wrap_function(f, []) pruned = f_wrapped.prune( feeds=(), fetches=(f_wrapped.graph.get_operation_by_name('increment'),)) self.assertEqual((None,), pruned()) self.assertEqual(1, self.evaluate(v)) del f, f_wrapped def f1(): v.assign_add( array_ops.placeholder(shape=[], dtype=dtypes.int32, name='step'), name='increment', read_value=False) return constant_op.constant(1, name='other') f_wrapped = wrap_function.wrap_function(f1, []) increments = f_wrapped.prune( feeds=(f_wrapped.graph.get_tensor_by_name('step:0')), fetches=(f_wrapped.graph.get_operation_by_name('increment'), f_wrapped.graph.get_tensor_by_name('other:0'))) first_output, second_output = increments(constant_op.constant(2)) self.assertEqual(['step:0', 'increment/resource:0'], [t.name for t in increments.inputs]) self.assertIs(None, first_output) self.assertEqual(1, second_output.numpy()) self.assertEqual(3, v.numpy()) does_not_increment = f_wrapped.prune( feeds=(f_wrapped.graph.get_tensor_by_name('step:0')), fetches=f_wrapped.graph.get_tensor_by_name('other:0')) self.assertEqual(1, does_not_increment(constant_op.constant(3)).numpy()) self.assertEqual(3, v.numpy()) def testPruneStatefulOpsFromWrappedFunc(self): v0 = variables.Variable(0) v1 = variables.Variable(0) # When we wrap a function, we expect it to be executed with 'tf.Graph` # rules: it's allowed to prune all ops that are not in transitive fanin of # the fetches. def f(x): v0.assign_add(1, name='increment_v0') v1.assign_add(1, name='increment_v1') return x f_wrapped = wrap_function.wrap_function(f, [1]) self.assertEqual(1, f_wrapped().numpy()) self.assertEqual(0, v0.numpy()) self.assertEqual(0, v1.numpy()) f_wrapped_with_name = wrap_function.wrap_function(f, [2], name='func') self.assertEqual(2, f_wrapped_with_name().numpy()) self.assertEqual(0, v0.numpy()) self.assertEqual(0, v1.numpy()) def test_operation_returned(self): v = variables.Variable(0) def f(): v.assign(1, read_value=False, name='assign_to_v') f_wrapped = wrap_function.wrap_function(f, []) operation_to_fetch = f_wrapped.graph.get_operation_by_name('assign_to_v') f_pruned = f_wrapped.prune( [], operation_to_fetch) self.assertEqual( ['assign_to_v'], [operation.name for operation in f_pruned.graph.control_outputs]) self.assertEqual(0, v.numpy()) f_pruned() self.assertEqual(1, v.numpy()) f_wrapped.prune([], 'assign_to_v')() f_wrapped.prune([], meta_graph_pb2.TensorInfo(name='assign_to_v'))() def test_function_from_graph_def(self): @def_function.function def make_graph_def(x): return x + 1. original_func_graph = make_graph_def.get_concrete_function( tensor_spec.TensorSpec([None, 2], dtypes.float32)).graph graph_def = original_func_graph.as_graph_def() revived_function = wrap_function.function_from_graph_def( graph_def, inputs=original_func_graph.inputs[0].name, outputs=original_func_graph.outputs[0].name) self.assertEqual(2., revived_function(constant_op.constant(1.)).numpy()) def test_create_variables_with_same_name(self): def f(): v1 = variables.Variable(0, name='v') v2 = variables.Variable(1, name='v') return v1, v2 f_wrapped = wrap_function.wrap_function(f, []) self.assertDictEqual( {'v:0': 0, 'v_1:0': 1}, # assert that variable names are uniquified {v.name: v.numpy() for v in f_wrapped._variable_holder.variables.values()}) # Uniquification should reset in separate calls to wrap_function. def f2(): v1 = variables.Variable(3, name='v') v2 = variables.Variable(4, name='v') return v1, v2 f_wrapped_2 = wrap_function.wrap_function(f2, []) self.assertDictEqual( {'v:0': 3, 'v_1:0': 4}, {v.name: v.numpy() for v in f_wrapped_2._variable_holder.variables.values()}) class WrappedGraphTest(test.TestCase): def testAddFunction(self): def fn(x): v = variables.Variable(3, name='v') v2 = variable_scope.get_variable( 'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32) return v + v2 + x with self.cached_session() as sess: result = fn(constant_op.constant(5)) sess.run(variables.global_variables_initializer()) expected = sess.run(result) g = wrap_function.WrappedGraph() signature = [tensor_spec.TensorSpec([], dtypes.int32)] wrapped_fn = g.wrap_function(fn, signature) self.assertEqual(expected, wrapped_fn(constant_op.constant(5)).numpy()) def testCollections(self): def fn(x): v = variables.VariableV1(3, name='v', trainable=False, collections=['a']) v2 = variable_scope.get_variable( 'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32, collections=['a', 'b']) return v + v2 + x def assert_collections(graph): self.assertLen(graph.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES), 1) self.assertLen(graph.get_collection('a'), 2) self.assertLen(graph.get_collection('b'), 1) g = wrap_function.WrappedGraph() g.wrap_function(fn, [tensor_spec.TensorSpec([], dtypes.int32)]) assert_collections(g.graph) def assert_fn(): assert_collections(ops.get_default_graph()) return 1 # Return is required # Assert that collections are accessible within a wrapped function. g.wrap_function(assert_fn, []) def testShareVariablesSameGraph(self): def add_v1(x): with variable_scope.variable_scope( 'reuse', reuse=variable_scope.AUTO_REUSE): v = variable_scope.get_variable( 'v', initializer=init_ops.Constant(3), shape=[], dtype=dtypes.int32) return v + x def subtract_v1(x): with variable_scope.variable_scope( 'reuse', reuse=variable_scope.AUTO_REUSE): v = variable_scope.get_variable( 'v', initializer=init_ops.Constant(4), shape=[], dtype=dtypes.int32) return v - x def different_variable_fn_v1(x): with variable_scope.variable_scope( 'no_reuse', reuse=variable_scope.AUTO_REUSE): v = variable_scope.get_variable( 'v', initializer=init_ops.Constant(5), shape=[], dtype=dtypes.int32) return v * x def increment_variable_v1(x): with variable_scope.variable_scope( 'reuse', reuse=variable_scope.AUTO_REUSE): v = variable_scope.get_variable( 'v', initializer=init_ops.Constant(6), shape=[], dtype=dtypes.int32) return v.assign_add(x) g = wrap_function.WrappedGraph() signature = [tensor_spec.TensorSpec([], dtypes.int32)] add = g.wrap_function(add_v1, signature) subtract = g.wrap_function(subtract_v1, signature) different_variable_fn = g.wrap_function(different_variable_fn_v1, signature) increment_variable = g.wrap_function(increment_variable_v1, signature) self.assertEqual(10, add(constant_op.constant(7)).numpy()) self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy()) # The shared variable has a starting value of 3 because add_v1 was wrapped # first. self.assertEqual(-4, subtract(constant_op.constant(7)).numpy()) self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy()) # Check that variable updates self.assertEqual(17, add(constant_op.constant(7)).numpy()) self.assertEqual(3, subtract(constant_op.constant(7)).numpy()) # Sanity check - result from this function shouldn't change. self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy()) self.assertAllEqual({'reuse/v', 'no_reuse/v'}, set(g.variables.keys())) def testShareVariablesDifferentGraphs(self): def add_v1(x): v = variables.Variable(3, name='v') return v + x def subtract_v1(x): v = variables.Variable(4, name='v') return v - x def different_variable_fn_v1(x): with ops.name_scope('different_scope'): v = variables.Variable(5, name='v') return v * x def increment_variable_v1(x): v = variables.Variable(6, name='v') return v.assign_add(x) signature = [tensor_spec.TensorSpec([], dtypes.int32)] vh = wrap_function.VariableHolder(share_variables=True) new_graph = lambda: wrap_function.WrappedGraph(variable_holder=vh) add = new_graph().wrap_function(add_v1, signature) subtract = new_graph().wrap_function(subtract_v1, signature) different_variable_fn = new_graph().wrap_function( different_variable_fn_v1, signature) increment_variable = new_graph().wrap_function( increment_variable_v1, signature) self.assertEqual(10, add(constant_op.constant(7)).numpy()) self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy()) # Because the variable in add_v1 was created first, its starting value is 3 # instead of the values defined in subtract_v1 or increment_variable_v1. self.assertEqual(-4, subtract(constant_op.constant(7)).numpy()) self.assertEqual(10, increment_variable(constant_op.constant(7)).numpy()) # Check that variable updates self.assertEqual(17, add(constant_op.constant(7)).numpy()) self.assertEqual(3, subtract(constant_op.constant(7)).numpy()) # Sanity check - result from this function shouldn't change. self.assertEqual(35, different_variable_fn(constant_op.constant(7)).numpy()) self.assertAllEqual({'v', 'different_scope/v'}, set(vh.variables.keys())) @test_util.run_in_graph_and_eager_modes def testImportedFunctionsRegistered(self): if test_util.is_gpu_available(): self.skipTest('not a GPU test') with ops.Graph().as_default() as graph: x = array_ops.placeholder(dtypes.variant, shape=[], name='foo') ds = dataset_ops.from_variant(x, structure=( tensor_spec.TensorSpec([], dtypes.int32))) y = ds.reduce(array_ops.zeros([], dtype=dtypes.int32), lambda p, q: p + q) graph_def = graph.as_graph_def() def fn_to_wrap(a): returned_elements = graph_def_importer.import_graph_def( graph_def, input_map={x.name: a}, return_elements=[y.name]) return returned_elements[0] wrapped_fn = wrap_function.wrap_function( fn_to_wrap, [tensor_spec.TensorSpec((), dtypes.variant)]) ds = dataset_ops.Dataset.from_tensor_slices([10, 20]) v = dataset_ops.to_variant(ds) self.evaluate(wrapped_fn(v)) def testReturnOp(self): def update_var_v1(x): v = variables.Variable(3, name='v') update_op = state_ops.assign(v, x).op return update_op g = wrap_function.WrappedGraph() signature = [tensor_spec.TensorSpec([], dtypes.int32)] update_var = g.wrap_function(update_var_v1, signature) self.assertEqual(g.variables['v'].numpy(), 3) update_var(constant_op.constant(12)) self.assertEqual(g.variables['v'].numpy(), 12) if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/wrap_function_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for profiler_client.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import profiler_client from tensorflow.python.eager import test from tensorflow.python.framework import errors from tensorflow.python.framework import test_util class ProfilerClientTest(test_util.TensorFlowTestCase): def testStartTracing_ProcessInvalidAddress(self): with self.assertRaises(errors.UnavailableError): profiler_client.start_tracing('localhost:6006', '/tmp/', 2000) def testMonitor_ProcessInvalidAddress(self): with self.assertRaises(errors.UnavailableError): profiler_client.monitor('localhost:6006', 2000) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/profiler_client_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Execution Callbacks for Eager Mode.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import functools import enum # pylint: disable=g-bad-import-order import numpy as np from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import execute from tensorflow.python.platform import tf_logging as logging class ExecutionCallback(enum.Enum): """Valid callback actions. These can be passed to `seterr` or `errstate` to create callbacks when specific events occur (e.g. an operation produces `NaN`s). IGNORE: take no action. PRINT: print a warning to `stdout`. RAISE: raise an error (e.g. `InfOrNanError`). WARN: print a warning using `tf.compat.v1.logging.warn`. """ IGNORE = "ignore" PRINT = "print" RAISE = "raise" WARN = "warn" _DEFAULT_CALLBACK_ACTION = ExecutionCallback.RAISE # TODO(cais): Consider moving this exception class to errors_impl.py. class InfOrNanError(Exception): """Exception for inf and/or nan being present in tensor.""" def __init__(self, op_type, op_name, output_index, num_outputs, value): """Constructor of InfOrNanError. Args: op_type: Type name of the op that generated the tensor with `inf`(s) or `nan`(s) (e.g., `Div`). op_name: Name of the op that generated the tensor with `inf`(s) or `nan`(s). This name is set by client and can be `None` if it is unset. output_index: The 0-based output index of the tensor that contains `inf`(s) or `nan`(s). num_outputs: Total number of outputs of the operation. value: The tensor value that contains `inf`(s) or `nan`(s). """ self._op_type = op_type self._op_name = op_name self._output_index = output_index self._num_outputs = num_outputs self._value = value self._total_count = np.size(value) self._inf_count = np.count_nonzero(np.isinf(value)) self._nan_count = np.count_nonzero(np.isnan(value)) super(InfOrNanError, self).__init__(self._get_error_message()) def _get_error_message(self): """Get the error message describing this InfOrNanError object.""" name_str = (("'%s'" % self._op_name) if self._op_name is not None else str(self._op_name)) msg = "Output %d of %d of TFE operation %s (name: %s) contains " % ( self._output_index + 1, self._num_outputs, self._op_type, name_str) if self._inf_count and self._nan_count: msg += "%d inf(s) and %d nan(s) " % (self._inf_count, self._nan_count) elif self._inf_count: msg += "%d inf(s) " % self._inf_count else: msg += "%d nan(s) " % self._nan_count msg += "out of a total of %d element(s). Tensor value: %s" % ( self._total_count, self._value) return msg @property def op_type(self): return self._op_type @property def op_name(self): return self._op_name @property def output_index(self): return self._output_index @property def num_outputs(self): return self._num_outputs @property def value(self): return self._value def inf_nan_callback(op_type, inputs, attrs, outputs, op_name, check_inf=True, check_nan=True, action=_DEFAULT_CALLBACK_ACTION): """An execution callback that checks for `inf`s and `nan`s in output tensors. This callback can be used with `tfe.add_execute_callback` to check for invalid numeric values. E.g., ```python tfe.add_execute_callback(tfe.inf_nan_callback) ``` Args: op_type: Name of the TFE operation type (e.g., `MatMul`). inputs: The `list` of input tensors to the operation, currently unused by this callback. attrs: Attributes of the TFE operation, as a tuple of alternating attribute names and attribute values. outputs: The `list` of output tensors from the operation, checked by this callback for `inf` and `nan` values. op_name: Name of the TFE operation. This name is set by client and can be `None` if it unset. check_inf: (`bool`) Whether this callback should check for `inf` values in the output tensor values. check_nan: (`bool`) Whether this callback should check for `nan` values in the output tensor values. action: (`ExecutionCallback`) Action to be taken by the callback when `inf` or `nan` values are detected. Raises: InfOrNanError: iff `inf` or `nan` values are seen in any of `outputs` and `action` is `"raise"`. ValueError: iff the value of `action` is invalid. """ del attrs, inputs # Not used. action = ExecutionCallback(action) ctx = context.context() for index, output in enumerate(outputs): if not output.dtype.is_numpy_compatible: continue numpy_dtype = output.dtype.as_numpy_dtype if (np.issubdtype(numpy_dtype, np.floating) or np.issubdtype(numpy_dtype, np.complex) or np.issubdtype(numpy_dtype, np.integer)): try: check_numerics_op_attrs = ( "message", "Eager-mode inf/nan check", "T", outputs[0].dtype.as_datatype_enum) # TODO(cais): Consider moving this into execute.py. # pylint: disable=protected-access ctx.ensure_initialized() pywrap_tensorflow.TFE_Py_Execute( ctx._handle, output.device, "CheckNumerics", [output], check_numerics_op_attrs, 1) # pylint: enable=protected-access except core._NotOkStatusException: # pylint: disable=protected-access value = output.numpy() inf_detected = np.any(np.isinf(value)) and check_inf nan_detected = np.any(np.isnan(value)) and check_nan if not inf_detected and not nan_detected: continue error = InfOrNanError(op_type, op_name, index, len(outputs), value) if action == ExecutionCallback.PRINT: print("Warning: %s" % str(error)) elif action == ExecutionCallback.WARN: logging.warn(str(error)) elif action == ExecutionCallback.RAISE: raise error else: raise ValueError( "Invalid action for inf_nan_callback: %s. Valid actions are: " "{PRINT | WARN | RAISE}" % action) def inf_callback(op_type, inputs, attrs, outputs, op_name, action=_DEFAULT_CALLBACK_ACTION): """A specialization of `inf_nan_callback` that checks for `inf`s only.""" inf_nan_callback( op_type, inputs, attrs, outputs, op_name, check_inf=True, check_nan=False, action=action) def nan_callback(op_type, inputs, attrs, outputs, op_name, action=_DEFAULT_CALLBACK_ACTION): """A specialization of `inf_nan_callback` that checks for `nan`s only.""" inf_nan_callback( op_type, inputs, attrs, outputs, op_name, check_inf=False, check_nan=True, action=action) def add_execution_callback(callback): """Add an execution callback to the default eager context. An execution callback is invoked immediately after an eager operation or function has finished execution, providing access to the op's type, name input and output tensors. Multiple execution callbacks can be added, in which case the callbacks will be invoked in the order in which they are added. To clear all execution callbacks that have been added, use `clear_execution_callbacks()`. Example: ```python def print_even_callback(op_type, inputs, attrs, outputs, op_name): # A callback that prints only the even output values. if outputs[0].numpy() % 2 == 0: print("Even output from %s: %s" % (op_name or op_type, outputs)) tfe.add_execution_callback(print_even_callback) x = tf.pow(2.0, 3.0) - 3.0 y = tf.multiply(x, tf.add(1.0, 5.0)) # When the line above is run, you will see all intermediate outputs that are # even numbers printed to the console. tfe.clear_execution_callbacks() ``` Args: callback: a callable of the signature `f(op_type, inputs, attrs, outputs, op_name)`. `op_type` is the type of the operation that was just executed (e.g., `MatMul`). `inputs` is the `list` of input `Tensor`(s) to the op. `attrs` contains the attributes of the operation as a `tuple` of alternating attribute name and attribute value. `outputs` is the `list` of output `Tensor`(s) from the op. `op_name` is the name of the operation that was just executed. This name is set by the client who created the operation and can be `None` if it is unset. Return value(s) from the callback are ignored. """ execute.execute = execute.execute_with_callbacks context.context().add_post_execution_callback(callback) def clear_execution_callbacks(): """Clear all execution callbacks from the default eager context.""" context.context().clear_post_execution_callbacks() def seterr(inf_or_nan=None): """Set how abnormal conditions are handled by the default eager context. Example: ```python tfe.seterr(inf_or_nan=ExecutionCallback.RAISE) a = tf.constant(10.0) b = tf.constant(0.0) try: c = a / b # <-- Raises InfOrNanError. except Exception as e: print("Caught Exception: %s" % e) tfe.seterr(inf_or_nan=ExecutionCallback.IGNORE) c = a / b # <-- Does NOT raise exception anymore. ``` Args: inf_or_nan: An `ExecutionCallback` determining the action for infinity (`inf`) and NaN (`nan`) values. A value of `None` leads to no change in the action of the condition. Returns: A dictionary of old actions. Raises: ValueError: If the value of any keyword arguments is invalid. """ inf_or_nan = ExecutionCallback(inf_or_nan) if inf_or_nan is not None else None old_settings = {"inf_or_nan": ExecutionCallback.IGNORE} default_context = context.context() carryover_callbacks = [] for callback in default_context.post_execution_callbacks: # Check whether the callback is inf_nan_callback or a partial object of # inf_nan_callback. if (callback == inf_nan_callback or isinstance(callback, functools.partial) and callback.func == inf_nan_callback): if callback == inf_nan_callback: old_settings["inf_or_nan"] = _DEFAULT_CALLBACK_ACTION else: old_settings["inf_or_nan"] = callback.keywords.get( "action", _DEFAULT_CALLBACK_ACTION) elif inf_or_nan is not None: carryover_callbacks.append(callback) if inf_or_nan is not None: default_context.clear_post_execution_callbacks() for callback in carryover_callbacks: default_context.add_post_execution_callback(callback) if inf_or_nan != ExecutionCallback.IGNORE: default_context.add_post_execution_callback( functools.partial(inf_nan_callback, action=inf_or_nan)) return old_settings @contextlib.contextmanager def errstate(inf_or_nan=None): """Context manager setting error state. Example: ``` c = tf.math.log(0.) # -inf with errstate(inf_or_nan=ExecutionCallback.RAISE): tf.math.log(0.) # <-- Raises InfOrNanError. ``` Args: inf_or_nan: An `ExecutionCallback` determining the action for infinity (`inf`) and NaN (`nan`) values. A value of `None` leads to no change in the action of the condition. Yields: None. Raises: ValueError: If the value of any keyword arguments is invalid. """ if not context.executing_eagerly(): yield else: old_settings = seterr(inf_or_nan=inf_or_nan) yield seterr(**old_settings)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/execution_callbacks.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import cancellation from tensorflow.python.platform import test class CancellationTest(test.TestCase): def testStartCancel(self): manager = cancellation.CancellationManager() self.assertFalse(manager.is_cancelled) manager.start_cancel() self.assertTrue(manager.is_cancelled) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/cancellation_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import config from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad from tensorflow.python.ops import nn_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.util import nest _COS_DERIVATIVES = [math_ops.cos, lambda x: -math_ops.sin(x), lambda x: -math_ops.cos(x), math_ops.sin, math_ops.cos] class FunctionGradientsTest(test.TestCase, parameterized.TestCase): def setUp(self): super(FunctionGradientsTest, self).setUp() cpus = config.list_physical_devices('CPU') # Set 4 virtual CPUs config.set_virtual_device_configuration(cpus[0], [ context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration(), context.VirtualDeviceConfiguration() ]) def testGraphModeWithGradients(self): v = resource_variable_ops.ResourceVariable(1.0, name='v') @def_function.function def step(): def inner(): return v * v return backprop.implicit_grad(inner)()[0][0] self.assertAllEqual(step(), 2.0) def testGraphGradientVariable(self): with ops.Graph().as_default(), self.cached_session(): v = variables.Variable(1.0) @def_function.function def f(): return 2.0 * v node = f() grads, = gradients_impl.gradients(node, v) v.initializer.run() self.assertAllEqual(grads.eval(), 2.0) self.assertEqual(grads.shape, v.shape) def testSymbolicHigherOrder(self): @def_function.function def f(x, order): y = def_function.function(lambda: math_ops.cos(x))() for _ in range(order): y, = gradients_impl.gradients(y, [x]) return y for order, expected in enumerate(_COS_DERIVATIVES): self.assertAllClose( expected(constant_op.constant(1.)), f(constant_op.constant(1.), order)) @parameterized.parameters([dict(persistent=True), dict(persistent=False)]) def testSymbolicHigherOrderUnderTape(self, persistent): @def_function.function def f(x, order): with backprop.GradientTape(persistent=persistent) as tape: tape.watch(x) # Note that having a tape active, even if we don't use it, forces us # down a different function call path. Symbolic gradients should work # here too; correctness of tape gradients are tested elsewhere. y = def_function.function(lambda: math_ops.cos(x))() tape_dy = tape.gradient(y, x) for _ in range(order): y, = gradients_impl.gradients(y, [x]) if order > 0: y1 = tape_dy for _ in range(order - 1): y1, = gradients_impl.gradients(y1, [x]) else: y1 = y return y, y1 for order, expected_f in enumerate(_COS_DERIVATIVES): expected = self.evaluate(expected_f(constant_op.constant(1.))) self.assertAllClose( (expected, expected), f(constant_op.constant(1.), order)) def testIteratedGradientsNested(self): def _grad(f): def _grad_function(primal): with backprop.GradientTape() as tape: tape.watch(primal) primal_out = f(primal) return tape.gradient(primal_out, primal) return _grad_function @def_function.function def _forward(x): return math_ops.cos(x) f = _forward traced_f = def_function.function(f) one = constant_op.constant(1.) for expected in _COS_DERIVATIVES: self.assertAllClose(expected(one), f(one)) self.assertAllClose(expected(one), traced_f(one)) self.assertAllClose(expected(one), def_function.function(f)(one)) f = _grad(f) traced_f = def_function.function(_grad(traced_f)) def testIteratedGradientsNestedWithVariable(self): def _grad(f): def _grad_function(): with backprop.GradientTape() as tape: primal_out = f() g, = tape.gradient(primal_out, tape.watched_variables()) return g return _grad_function v = variables.Variable(2.) @def_function.function def _forward(): return math_ops.cos(v) f = _forward two = constant_op.constant(2.) for expected in _COS_DERIVATIVES: self.assertAllClose(expected(two), f()) self.assertAllClose(expected(two), def_function.function(f)()) f = _grad(f) def testIteratedGradientsPersistent(self): @def_function.function def _forward(z): return math_ops.cos(z) f = _forward with backprop.GradientTape(persistent=True) as tape: start = constant_op.constant(1.) tape.watch(start) x = f(start) for expected in _COS_DERIVATIVES: self.assertAllClose(expected(start), x) x = tape.gradient(x, start) def testHigherOrderWithVariable(self): v = variables.Variable(1.) @def_function.function def _forward(): return math_ops.cos(v) f = _forward with backprop.GradientTape(persistent=True) as tape: x = f() for expected in _COS_DERIVATIVES: self.assertAllClose(expected(constant_op.constant(1.)), x) x, = tape.gradient(x, tape.watched_variables()) def testGradientsChained(self): @def_function.function def _forward(z): return math_ops.cos(z) f = _forward x = constant_op.constant(1.) with backprop.GradientTape() as t: t.watch(x) y = f(x) with backprop.GradientTape() as tt: doutputs = constant_op.constant(2.) tt.watch(doutputs) g = t.gradient(y, x, doutputs) self.assertAllClose(-2. * math_ops.sin(x), g) gg = tt.gradient(g, doutputs) # We're taking gradients with respect to doutputs, which is just a linear # function of the gradient. self.assertAllClose(-math_ops.sin(x), gg) def testSymGradGatherNd(self): with ops.Graph().as_default(), self.cached_session(): @def_function.function def f(x): return array_ops.gather_nd(x, [[0]]) c = constant_op.constant([[2.]]) f_c = f(c) g, = gradients_impl.gradients(f_c, c) self.assertAllEqual(self.evaluate(g).values, [[1.0]]) def testNoSymGradNestedDefun(self): @def_function.function def outer(): @def_function.function def f(x): return array_ops.gather_nd(x, [[0]]) c = constant_op.constant([[2.]]) f_c = f(c) g, = gradients_impl.gradients(f_c, c) self.assertIsInstance(g, ops.IndexedSlices) outer() def testGraphFunctionWithGradients(self): v = resource_variable_ops.ResourceVariable(1.0, name='v') @def_function.function def step(): def inner(): return v * v return backprop.implicit_grad(inner)()[0][0] step_op = step.get_concrete_function() self.assertEqual(step_op.output_dtypes, dtypes.float32) self.assertEqual(step_op.output_shapes, tensor_shape.TensorShape([])) self.assertAllEqual(step_op(), 2.0) @test_util.run_in_graph_and_eager_modes() def testDefunCondGradient(self): @def_function.function def f(x): return control_flow_ops.cond(x > 0.5, lambda: 2 * x, lambda: 3 * x) with backprop.GradientTape() as t: x = constant_op.constant(1.0) t.watch(x) y = f(x) self.assertAllEqual(self.evaluate(t.gradient(y, x)), 2.0) @test_util.run_in_graph_and_eager_modes() def testGraphLoopGradient(self): @def_function.function def f(x): return control_flow_ops.while_loop(lambda _, i: i < 2, lambda x, i: (2*x, i + 1), [x, 0])[0] with backprop.GradientTape() as t: x = constant_op.constant(1.0) t.watch(x) y = f(x) self.assertAllEqual(self.evaluate(t.gradient(y, x)), 4.0) def testGraphLoopGradientInsideSession(self): with ops.Graph().as_default(): n = constant_op.constant(2.0) x = array_ops.placeholder(dtypes.float32, shape=None) @def_function.function def f(): c = lambda n: n < 10 b = lambda n: n * x return control_flow_ops.while_loop(c, b, [n], [tensor_shape.unknown_shape()]) l = f() dx = gradients_impl.gradients(l, [x])[0] with self.cached_session(): self.assertEqual(dx.eval(feed_dict={x: 2.0}), 24.0) def testDefunDifferentiable(self): v = resource_variable_ops.ResourceVariable(1.0) @def_function.function def f(): return v * v self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0) def testDefunCanBeDifferentiatedTwice(self): v = resource_variable_ops.ResourceVariable(1.0) @def_function.function def f(): return v * v self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0) # Ensure that v is watched again. self.assertAllEqual(backprop.implicit_grad(f)()[0][0], 2.0) def testSymbolicGradientVariableNoneNotZerosLike(self): with ops.Graph().as_default(): v = variables.Variable(1.0) @def_function.function def f(x, v): v.read_value() return x * x x = constant_op.constant(1.0) l = f(x, v) _, dv = gradients_impl.gradients(l, [x, v]) with self.cached_session(): v.initializer.run() self.assertEqual(dv, None) def testDefunCallBackprop(self): @def_function.function def f(x): return math_ops.add(x, x) @def_function.function def g(x): return backprop.gradients_function(f, [0])(x)[0] self.assertAllEqual(2, g(constant_op.constant(2.))) @test_util.run_v1_only('b/120545219') def testGraphModeEagerGradError(self): with context.graph_mode(): def f(): x = variable_scope.get_variable( 'v', initializer=constant_op.constant(1.0)) return x * constant_op.constant(2.0) with self.assertRaisesRegexp(ValueError, 'No trainable variables were accessed'): backprop.implicit_val_and_grad(f)() def testDefunCallBackpropUsingSameObjectForMultipleArguments(self): @def_function.function def g(x): return backprop.gradients_function(math_ops.multiply, [0, 1])(x, x) def np_g(x): return [d.numpy() for d in g(x)] x = constant_op.constant(1.) self.assertAllEqual([1., 1.], np_g(x)) self.assertAllEqual([1., 1.], np_g(1.)) def testGradientTensorConversionWithDefun(self): three = resource_variable_ops.ResourceVariable(3.0, name='v') @def_function.function def f(x): return math_ops.add(x, three) def g(x): return f(x) g = backprop.implicit_grad(g)(constant_op.constant(1.0))[0][0] self.assertAllEqual(g, 1.0) def testGradient(self): matmul = def_function.function(math_ops.matmul) def sq(x): return matmul(x, x, transpose_a=True) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) grad_t, = backprop.gradients_function(sq, [0])(t) self.assertAllEqual(grad_t, [[6, 6], [14, 14]]) def testGradientInFunction(self): @def_function.function def f(x): return backprop.gradients_function(lambda y: y * y, [0])(x)[0] self.assertAllEqual(f(constant_op.constant(1.0)), 2.0) def testGradientOfGatherWithDefun(self): v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0]) def sum_gather(): return math_ops.reduce_sum(array_ops.gather(v, [1, 2])) grad_fn = backprop.implicit_grad(sum_gather) gradient = grad_fn() defun_grad_fn = backprop.implicit_grad(def_function.function(sum_gather)) defun_gradient = defun_grad_fn() self.assertEqual(len(gradient), len(defun_gradient)) gradient = gradient[0][0] defun_gradient = defun_gradient[0][0] self.assertAllEqual(gradient.values, defun_gradient.values) self.assertAllEqual(gradient.indices, defun_gradient.indices) self.assertAllEqual(gradient.dense_shape, defun_gradient.dense_shape) def testDifferentiableFunctionNoneOutputs(self): @def_function.function def my_function(x): return x, None def wrapper(x): return my_function(x)[0] g = backprop.gradients_function(wrapper, [0])(constant_op.constant(0.0)) self.assertAllEqual(g[0], 1.) @def_function.function def foo(a): return None, a * a x = constant_op.constant(5.0) with backprop.GradientTape() as tp: tp.watch(x) none, r = foo(x) g = tp.gradient(r, x) self.assertIs(none, None) self.assertAllEqual(r, 25.0) self.assertAllEqual(g, 2 * 5.0) @test_util.run_in_graph_and_eager_modes def testNestedDifferentiableFunction(self): @def_function.function def inner_fn(a, b): return a * math_ops.add(a, b) @def_function.function def outer_fn(x): return inner_fn(x, 1.0) x = constant_op.constant(5.0) with backprop.GradientTape() as tp: tp.watch(x) result = outer_fn(x) grad = tp.gradient(result, x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunction(self): @def_function.function def inner_inner_fn(a, b): return math_ops.add(a, b) @def_function.function def inner_fn(a, b): return inner_inner_fn(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def outer_fn(x): return middle_fn(x, 1.0) x = constant_op.constant(5.0) with backprop.GradientTape() as tp: tp.watch(x) result = outer_fn(x) grad = tp.gradient(result, x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunctionWithMultipleGradCalls(self): @def_function.function def inner_fn(a, b): return math_ops.add(a, b) @def_function.function def middle_fn(a, b): return math_ops.mul(a, inner_fn(a, b)) @def_function.function def outer_fn(x): return middle_fn(x, 3.0) x = constant_op.constant(5.0) self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0)) with backprop.GradientTape() as tp: tp.watch(x) result = outer_fn(x) grad = tp.gradient(result, x) self.assertAllEqual(grad, 2 * 5.0 + 3.0) self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0)) self.assertAllEqual(middle_fn(3.0, x), 3.0 * (3.0 + 5.0)) with backprop.GradientTape() as tp: tp.watch(x) result = outer_fn(x) grad = tp.gradient(result, x) self.assertAllEqual(grad, 2 * 5.0 + 3.0) y = constant_op.constant(4.0) with backprop.GradientTape() as tp: tp.watch(y) result = outer_fn(y) grad = tp.gradient(result, y) self.assertAllEqual(grad, 2 * 4.0 + 3.0) with backprop.GradientTape() as tp: tp.watch(y) result = inner_fn(y, y) grad = tp.gradient(result, y) self.assertAllEqual(grad, 2.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunctionGradientTapeInDefun(self): @def_function.function def inner_inner_fn(a, b): return math_ops.add(a, b) @def_function.function def inner_fn(a, b): return inner_inner_fn(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def outer_fn(x): with backprop.GradientTape() as tp: tp.watch(x) result = middle_fn(x, 1.0) grad = tp.gradient(result, x) return grad x = constant_op.constant(5.0) grad = outer_fn(x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunctionGradientTapeInNestedDefun(self): @def_function.function def inner_inner_fn(a, b): return math_ops.add(a, b) @def_function.function def inner_fn(a, b): return inner_inner_fn(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def almost_outer_fn(x): with backprop.GradientTape() as tp: tp.watch(x) result = middle_fn(x, 1.0) grad = tp.gradient(result, x) return grad @def_function.function def outer_fn(x): return almost_outer_fn(x) x = constant_op.constant(5.0) grad = outer_fn(x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunctionGradientTapeInMultNestedDefun(self): @def_function.function def inner_inner_fn(a, b): return math_ops.add(a, b) @def_function.function def inner_fn(a, b): return inner_inner_fn(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def almost_outer_fn(x): with backprop.GradientTape() as tp: tp.watch(x) result = middle_fn(x, 1.0) grad = tp.gradient(result, x) return grad @def_function.function def outer_fn(x): return almost_outer_fn(x) @def_function.function def outer_outer_fn(x): return outer_fn(x) x = constant_op.constant(5.0) grad = outer_outer_fn(x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunctionTFGradientInDefun(self): @def_function.function def inner_inner_fn(a, b): return math_ops.add(a, b) @def_function.function def inner_fn(a, b): return inner_inner_fn(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def outer_fn(x): result = middle_fn(x, 1.0) return gradients_impl.gradients(result, [x])[0] x = constant_op.constant(5.0) grad = outer_fn(x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunctionTFGradientInNestedDefun(self): @def_function.function def inner_inner_fn(a, b): return math_ops.add(a, b) @def_function.function def inner_fn(a, b): return inner_inner_fn(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def almost_outer_fn(x): result = middle_fn(x, 1.0) return gradients_impl.gradients(result, [x])[0] @def_function.function def outer_fn(x): return almost_outer_fn(x) x = constant_op.constant(5.0) grad = outer_fn(x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) @test_util.run_in_graph_and_eager_modes def testDeeplyNestedDifferentiableFunctionTFGradientInMultNestedDefun(self): @def_function.function def inner_inner_fn(a, b): return math_ops.add(a, b) @def_function.function def inner_fn(a, b): return inner_inner_fn(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def almost_outer_fn(x): result = middle_fn(x, 1.0) return gradients_impl.gradients(result, [x])[0] @def_function.function def outer_fn(x): return almost_outer_fn(x) @def_function.function def outer_outer_fn(x): return outer_fn(x) x = constant_op.constant(5.0) grad = outer_outer_fn(x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) def testDeeplyNestedDifferentiableFunctionWithVariable(self): var = variables.Variable(constant_op.constant(1.0)) @def_function.function def inner_fn(a, b): return math_ops.add(a, b) @def_function.function def middle_fn(a, b): return a * inner_fn(a, b) @def_function.function def outer_fn(x): return middle_fn(x, var) x = constant_op.constant(5.0) with backprop.GradientTape() as tp: tp.watch(x) result = outer_fn(x) grad = tp.gradient(result, x) self.assertAllEqual(grad, 2 * 5.0 + 1.0) def testDeeplyNestedDifferentiableFunctionWithVariableMultipleGradCalls(self): v = variables.Variable(constant_op.constant(3.0)) @def_function.function def inner_fn(a, b): return math_ops.add(a, b) @def_function.function def middle_fn(a, b): return math_ops.mul(a, inner_fn(a, b)) @def_function.function def outer_fn(x): return middle_fn(x, v) x = constant_op.constant(5.0) self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0)) with backprop.GradientTape() as tp: tp.watch(x) result = outer_fn(x) grad = tp.gradient(result, x) self.assertAllEqual(grad, 2 * 5.0 + 3.0) self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0)) self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0)) with backprop.GradientTape() as tp: tp.watch(x) result = outer_fn(x) grad = tp.gradient(result, x) self.assertAllEqual(grad, 2 * 5.0 + 3.0) y = constant_op.constant(4.0) with backprop.GradientTape() as tp: tp.watch(y) result = outer_fn(y) grad = tp.gradient(result, y) self.assertAllEqual(grad, 2 * 4.0 + 3.0) v.assign(constant_op.constant(1.5)) with backprop.GradientTape() as tp: tp.watch(y) result = outer_fn(y) grad = tp.gradient(result, y) self.assertAllEqual(grad, 2 * 4.0 + 1.5) with backprop.GradientTape() as tp: tp.watch(y) result = inner_fn(y, v) grad = tp.gradient(result, y) self.assertAllEqual(grad, 1.0) def testDeeplyNestedDifferentiableFunctionWithVariableMultipleTFGrads(self): with context.graph_mode(), self.cached_session(): v = resource_variable_ops.ResourceVariable(3.0) v.initializer.run() @def_function.function def inner_fn(a, b): return math_ops.add(a, b) @def_function.function def middle_fn(a, b): return math_ops.mul(a, inner_fn(a, b)) @def_function.function def outer_fn(x): return middle_fn(x, v) x = constant_op.constant(5.0) self.assertAllEqual(outer_fn(x).eval(), 5.0 * (5.0 + 3.0)) grad, = gradients_impl.gradients(outer_fn(x), x) self.assertAllEqual(grad, 2 * 5.0 + 3.0) self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0)) self.assertAllEqual(middle_fn(v, x), 3.0 * (3.0 + 5.0)) grad, = gradients_impl.gradients(outer_fn(x), x) self.assertAllEqual(grad, 2 * 5.0 + 3.0) y = constant_op.constant(4.0) grad, = gradients_impl.gradients(outer_fn(y), y) self.assertAllEqual(grad, 2 * 4.0 + 3.0) self.evaluate(v.assign(constant_op.constant(1.5))) grad, = gradients_impl.gradients(outer_fn(y), y) self.assertAllEqual(grad, 2 * 4.0 + 1.5) grad, = gradients_impl.gradients(inner_fn(y, v), y) self.assertAllEqual(grad, 1.0) def testNestedDifferentiableFunctionNoneOutputs(self): @def_function.function def foo(a, b): return None, a * math_ops.add(a, b), None, 2*a @def_function.function def bar(x): return foo(x, 1.0) x = constant_op.constant(5.0) with backprop.GradientTape(persistent=True) as tp: tp.watch(x) none1, r1, none2, r2 = bar(x) g1 = tp.gradient(r1, x) g2 = tp.gradient(r2, x) self.assertAllEqual(r1, 30.0) self.assertAllEqual(r2, 10.0) self.assertIs(none1, None) self.assertIs(none2, None) self.assertAllEqual(g1, 2 * 5.0 + 1.0) self.assertAllEqual(g2, 2.0) def testGradientWithKeywordArguments(self): matmul = def_function.function(math_ops.matmul) def sq(x): return matmul(a=x, b=x, transpose_a=True) t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]]) grad_t, = backprop.gradients_function(sq, [0])(t) self.assertAllEqual(grad_t, [[6, 6], [14, 14]]) with backprop.GradientTape(persistent=True) as tape: tape.watch(t) one = matmul(t, b=t, transpose_a=True) two = matmul(b=t, a=t, transpose_a=True) three = matmul(a=t, b=t, transpose_a=True) for output in [one, two, three]: self.assertAllEqual(tape.gradient(output, t), [[6, 6], [14, 14]]) def testGradientInFunctionWithKeywordArguments(self): @def_function.function def f(x): return backprop.gradients_function(lambda y: y * y, [0])(x)[0] self.assertAllEqual(f(x=constant_op.constant(1.0)), 2.0) def testFunctionHasNoSecondOrderGradient(self): # This test needs nn_grad imported. We could just disable the lint error, # but this way if the test is deleted we'll know the import isn't needed. _ = nn_grad v = variables.Variable(1.) @def_function.function def f(labels, logits): return def_function.function( nn_ops.sparse_softmax_cross_entropy_with_logits)( labels=labels, logits=logits + v) @def_function.function def f_grad(): with backprop.GradientTape() as tape: logits = constant_op.constant([1., 2.]) tape.watch(logits) out = f(constant_op.constant(1), logits) return tape.gradient(out, logits) # Mainly we want to check that the function builds despite # sparse_softmax_cross_entropy_with_logits not having a second-order # gradient defined. self.assertAllEqual([2], f_grad().shape) @test_util.run_in_graph_and_eager_modes def testBackwardNone(self): model = variables.Variable(1.0, name='model') count = variables.Variable(0) @function.defun def forward_pass(value): count.assign_add(1) residuals = value - model loss = 0.5 * math_ops.reduce_mean(math_ops.pow(residuals, 2)) # Note: count is an integer, so its doutput will be None return loss, count def reduce_fn(x): if context.executing_eagerly(): with backprop.GradientTape() as t: loss, count = forward_pass(x) return t.gradient(loss, model), count loss, count = forward_pass(x) grad_only = gradients_impl.gradients(loss, model) return grad_only, count g, _ = reduce_fn(constant_op.constant([7.0])) self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(nest.flatten(self.evaluate(g)), [-6.0]) if __name__ == '__main__': ops.enable_eager_execution() test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/function_gradients_test.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Code for backpropagation using the tape utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import operator import sys import six from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.eager import execute from tensorflow.python.eager import imperative_grad from tensorflow.python.eager import tape from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_inspect from tensorflow.python.util.lazy_loader import LazyLoader from tensorflow.python.util.tf_export import tf_export # Note that we need to lazy load the following two modules to avoid creating # circular dependencies. # TODO(b/119775953): fix the circular dependencies. pfor_ops = LazyLoader( "pfor_ops", globals(), "tensorflow.python.ops.parallel_for.control_flow_ops") function = LazyLoader("function", globals(), "tensorflow.python.eager.function") _op_attr_type_cache = {} def op_attr_type(op_type, attr_name): try: return _op_attr_type_cache[(op_type, attr_name)] except KeyError: context.ensure_initialized() h = context.context()._handle # pylint: disable=protected-access attr_type = pywrap_tensorflow.TFE_OpNameGetAttrType(h, op_type, attr_name) _op_attr_type_cache[(op_type, attr_name)] = attr_type return attr_type def make_attr(attr_type, value): if attr_type == pywrap_tensorflow.TF_ATTR_TYPE: return dtypes.as_dtype(value) elif attr_type == [pywrap_tensorflow.TF_ATTR_TYPE]: return [dtypes.as_dtype(v) for v in value] elif attr_type == pywrap_tensorflow.TF_ATTR_SHAPE: return tensor_shape.as_shape(value).as_proto() elif attr_type == [pywrap_tensorflow.TF_ATTR_SHAPE]: return [tensor_shape.as_shape(v).as_proto() for v in value] elif isinstance(value, str): return value.encode() return value class _MockOp(object): """Pretends to be a tf.Operation for the gradient functions.""" def __init__(self, attrs, inputs, outputs, typ, skip_input_indices): self.attrs = attrs self.inputs = inputs self.outputs = outputs self.type = typ self.skip_input_indices = skip_input_indices def get_attr(self, attr): typ = op_attr_type(self.type, attr) for i in range(0, len(self.attrs), 2): if self.attrs[i] == attr: return make_attr(typ, self.attrs[i + 1]) raise KeyError(attr) def _get_control_flow_context(self): raise NotImplementedError( "tf.GradientTape.gradients() does not support graph control flow " "operations like tf.cond or tf.while at this time. Use tf.gradients() " "instead. If you need this feature, please file a feature request at " "https://github.com/tensorflow/tensorflow/issues/new" ) def _gradient_function(op_name, attr_tuple, num_inputs, inputs, outputs, out_grads, skip_input_indices): """Calls the gradient function of the op. Args: op_name: the name of the op to be differentiated. attr_tuple: the attrs, as a tuple. num_inputs: the number of inputs to the op. inputs: inputs to the original operation. outputs: outputs to the original operation. out_grads: gradients of the operation wrt its outputs. skip_input_indices: a tuple that is passed to the gradient function, indicating which inputs to skip calculating the gradient for Returns: The gradients with respect to the inputs of the function, as a list. """ mock_op = _MockOp(attr_tuple, inputs, outputs, op_name, skip_input_indices) grad_fn = ops._gradient_registry.lookup(op_name) # pylint: disable=protected-access if grad_fn is None: return [None] * num_inputs return grad_fn(mock_op, *out_grads) pywrap_tensorflow.TFE_Py_RegisterGradientFunction(_gradient_function) def _record_gradient(op_name, inputs, attrs, results, name): return pywrap_tensorflow.TFE_Py_RecordGradient(op_name, inputs, attrs, results, name) execute.record_gradient = _record_gradient def implicit_val_and_grad(f): """Returns a function which differentiates f with respect to variables. The wrapped function returns the value and the gradient of f when called with the same arguments. The gradient is with respect to all trainable TFE variables accessed by `f`. This function is useful when the exact set of variables to differentiate with is not known ahead of time. Example: ```python dense_layer = tf.compat.v1.layers.Dense(1) def loss(x, y): return tf.reduce_sum(tf.square(dense_layer(x) - y)) # Obtain the gradient function. val_grad_fn = tfe.implicit_value_and_gradients(loss) # Invoke the gradient function with concrete values of x and y. x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) y = tf.constant([[10.0], [20.0]]) value, grads_and_vars = val_grad_fn(x, y) print('Value of loss: %s' % value) # Apply the gradients to Variables. optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) optimizer.apply_gradients(grads_and_vars) ``` Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. Returns: A function which, when called, returns a tuple pair. Its first element is the value to which the function evaluates. Its second element is list of (gradient, variable) pairs. Raises: ValueError: if `f` returns None. """ # TODO(cais): Remove calls to tf.constant() once the gradients functions # accept lists and np.ndarrays. def grad_fn(*args, **kwds): """Computes the gradient of the wrapped function.""" this_tape = tape.push_new_tape() try: end_node = f(*args, **kwds) if end_node is None: raise ValueError("Cannot differentiate a function that returns None; " "did you forget to return a value from {}?".format( f.__name__)) finally: tape.pop_tape(this_tape) # Note: variables are returned in construction order. This ensures unique # order across executions. variables = this_tape.watched_variables() if not variables: raise ValueError("No trainable variables were accessed while the " "function was being computed.") sources = [v.handle for v in variables] grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node), sources) return end_node, list(zip(grad, variables)) return grad_fn def implicit_grad(f): """Returns a function which differentiates f with respect to variables. The wrapped function returns the gradient of f when called with the same arguments. The gradient is with respect to all trainable TFE variables accessed by `f`. This function is useful when the exact set of variables to differentiate with is not known ahead of time. Example: ```python dense_layer = tf.compat.v1.layers.Dense(1) def loss(x, y): return tf.reduce_sum(tf.square(dense_layer(x) - y)) # Obtain the gradient function. grad_fn = tfe.implicit_gradients(loss) # Invoke the gradient function with concrete values of x and y. x = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) y = tf.constant([[10.0], [20.0]]) grads_and_vars = grad_fn(x, y) # Apply the gradients to Variables. optimizer = tf.compat.v1.train.GradientDescentOptimizer(0.1) optimizer.apply_gradients(grads_and_vars) ``` Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. Returns: A function which, when called, returns a list of (gradient, variable) pairs. """ # TODO(cais): Remove calls to tf.constant() once the gradients functions # accept lists and np.ndarrays. def grad_fn(*args, **kwds): """Computes the gradient of the wrapped function.""" return implicit_val_and_grad(f)(*args, **kwds)[1] return grad_fn def _get_arg_spec(f, params, param_args): """The positions of the parameters of f to be differentiated in param_args.""" try: args = tf_inspect.getfullargspec(f).args except TypeError as e: # TypeError can happen when f is a callable object. if params is None: return range(len(param_args)) elif all(isinstance(x, int) for x in params): return params raise ValueError("Either callable provided is not a function or could not " "inspect its arguments by name: %s. Original error: %s" % (f, e)) if params is None: if not args: return range(len(param_args)) return range(len(args)) elif all(isinstance(x, six.string_types) for x in params): return [args.index(n) for n in params] elif all(isinstance(x, int) for x in params): return params else: raise ValueError( "params must be all strings or all integers; got %s." % params) def gradients_function(f, params=None): """Returns a function which differentiates f with respect to params. Example: ```python # f(x, y) = (x ^ 3) * y - x * (y ^ 2) # Therefore, the 1st order derivatives are: # df / dx = 3 * (x ^ 2) * y - y ^ 2 # df / dy = x ^ 3 - 2 * x * y # The 2nd order derivatives with respect to x is: # d^2 f / (dx)^2 = 6 * x * y def f(x, y): return x * x * x * y - x * y * y # Obtain a function that returns 1st order gradients. grad_fn = tfe.gradients_function(f) x = 2.0 y = 3.0 # Invoke the 1st order gradient function. x_grad, y_grad = grad_fn(x, y) assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2 assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 # Obtain a function that returns the 2nd order gradient with respect to x. gradgrad_fn = tfe.gradients_function(lambda x, y: grad_fn(x, y)[0]) # Invoke the 2nd order gradient function. x_gradgrad = gradgrad_fn(x, y)[0] assert x_gradgrad.numpy() == 6 * 2 * 3 # To obtain a callable that returns the gradient(s) of `f` with respect to a # subset of its inputs, use the `params` keyword argument with # `gradients_function()`. ygrad_fn = tfe.gradients_function(f, params=[1]) (y_grad,) = ygrad_fn(x, y) assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 ``` Note that only tensors with real or complex dtypes are differentiable. Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. If desired, the tensors can be elementwise multiplied by the tensors passed as the `dy` keyword argument to the returned gradient function. params: list of parameter names of f or list of integers indexing the parameters with respect to which we'll differentiate. Passing None differentiates with respect to all parameters. Returns: function which, when called, returns the value of f and the gradient of `f` with respect to all of `params`. The function takes an extra optional keyword argument `dy`. Setting it allows computation of vector jacobian products for vectors other than the vector of ones. Raises: ValueError: if the params are not all strings or all integers. """ def decorated(*args, **kwds): """Computes the gradient of the decorated function.""" _, grad = val_and_grad_function(f, params=params)(*args, **kwds) return grad return decorated def _ensure_unique_tensor_objects(parameter_positions, args): """Make each of the parameter_positions in args a unique ops.Tensor object. Ensure that each parameter is treated independently. For example: def f(x, y): return x * y g = gradients_function(f) one = tf.constant(1.) g(one, one) should return [1., 1.] (even though the two arguments are the same Tensor object). Args: parameter_positions: List of indices into args defining the arguments to differentiate against. args: A list of arguments to the function to be differentiated. Returns: args, possibly edited in-place. """ s = set() for (i, t) in enumerate(args): if i in parameter_positions: tid = ops.tensor_id(t) if tid in s: args[i] = gen_array_ops.identity(args[i]) else: s.add(tid) return args def val_and_grad_function(f, params=None): """Returns a function that computes f and its derivative w.r.t. params. Example: ```python # f(x, y) = (x ^ 3) * y - x * (y ^ 2) # Therefore, the 1st order derivatives are: # df / dx = 3 * (x ^ 2) * y - y ^ 2 # df / dy = x ^ 3 - 2 * x * y def f(x, y): return x * x * x * y - x * y * y # Obtain a function that returns the function value and the 1st order # gradients. val_grads_fn = tfe.value_and_gradients_function(f) x = 2.0 y = 3.0 # Invoke the value-and-gradients function. f_val, (x_grad, y_grad) = val_grads_fn(x, y) assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2) assert x_grad.numpy() == 3 * (2 ** 2) * 3 - 3 ** 2 assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 # To obtain a callable that returns the value of `f` and the gradient(s) of # `f` with respect to a subset of its inputs, use the `params` keyword # argument with `value_and_gradients_function()`. val_ygrad_fn = tfe.value_and_gradients_function(f, params=[1]) f_val, (y_grad,) = val_ygrad_fn(x, y) assert f_val.numpy() == (2 ** 3) * 3 - 2 * (3 ** 2) assert y_grad.numpy() == (2 ** 3) - 2 * 2 * 3 ``` Args: f: function to be differentiated. If `f` returns a scalar, this scalar will be differentiated. If `f` returns a tensor or list of tensors, by default a scalar will be computed by adding all their values to produce a single scalar. If desired, the tensors can be elementwise multiplied by the tensors passed as the `dy` keyword argument to the returned gradient function. params: list of parameter names of f or list of integers indexing the parameters with respect to which we'll differentiate. Passing `None` differentiates with respect to all parameters. Returns: function which, when called, returns the value of f and the gradient of f with respect to all of `params`. The function takes an extra optional keyword argument "dy". Setting it allows computation of vector jacobian products for vectors other than the vector of ones. Raises: ValueError: if the params are not all strings or all integers. """ def decorated(*args, **kwds): """Computes the value and gradient of the decorated function.""" dy = kwds.pop("dy", None) if kwds: raise ValueError("Functions to be differentiated cannot " "receive keyword arguments.") val, vjp = make_vjp(f, params)(*args, **kwds) return val, vjp(dy=dy) return decorated def make_vjp(f, params=None, persistent=True): """Returns a function that computes f and its vjp w.r.t. params. The term "vjp" here is an abbreviation for vector-jacobian product. Args: f: the function to be differentiated. params: the parameters (numbers or names) to differentiate with respect to. A value of None will differentiate with respect to all parameters. persistent: Boolean controlling whether the VJP function can be re-used. Must be True or False. Returns: A function, which when called, returns a tuple (value, vjp), where: - value is the result of calling f. - vjp is a function, which takes a vector as an argument and returns the product of that vector with the Jacobian of f. Providing no argument to vjp is equivalent to providing a vector of ones. For example, ```python def f(x): return x * x wrapped_fn = tfe.make_vjp(f) result, vjp = wrapped_fn(tf.constant(3.0)) # result is 9.0 vjp() # the vjp function rturns 6.0 Raises: ValueError: if `f` returns None. """ def decorated(*args, **kwds): """Computes the value and gradient of the decorated function.""" parameter_positions = _get_arg_spec(f, params, args) assert not kwds, "The gradient function can't take keyword arguments." this_tape = tape.push_new_tape(persistent=persistent) try: sources = [] args = [ ops.convert_to_tensor(arg) if i in parameter_positions else arg for i, arg in enumerate(args) ] args = _ensure_unique_tensor_objects(parameter_positions, args) for i in parameter_positions: sources.append(args[i]) tape.watch(this_tape, args[i]) result = f(*args) if result is None: raise ValueError("Cannot differentiate a function that returns None; " "did you forget to return a value from {}?".format( f.__name__)) flat_result = nest.flatten(result) flat_result = [gen_array_ops.identity(x) for x in flat_result] result = nest.pack_sequence_as(result, flat_result) finally: tape.pop_tape(this_tape) def vjp(dy=None): if dy is not None: dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)] return imperative_grad.imperative_grad( this_tape, nest.flatten(result), sources, output_gradients=dy) return result, vjp return decorated def flatten_nested_indexed_slices(grad): assert isinstance(grad, ops.IndexedSlices) if isinstance(grad.values, ops.Tensor): return grad else: assert isinstance(grad.values, ops.IndexedSlices) g = flatten_nested_indexed_slices(grad.values) return ops.IndexedSlices(g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape) def aggregate_indexed_slices_gradients(grads): """Aggregates gradients containing `IndexedSlices`s.""" if len(grads) < 1: return None elif len(grads) == 1: return grads[0] else: grads = [g for g in grads if g is not None] # If any gradient is a `Tensor`, sum them up and return a dense tensor # object. if any(isinstance(g, ops.Tensor) for g in grads): return math_ops.add_n(grads) # The following `_as_indexed_slices_list` casts ids of IndexedSlices into # int64. It is to make sure the inputs of `concat` all have same the data # type. grads = math_ops._as_indexed_slices_list(grads) # pylint: disable=protected-access grads = [flatten_nested_indexed_slices(x) for x in grads] # Form IndexedSlices out of the concatenated values and indices. concat_grad = ops.IndexedSlices( array_ops.concat([x.values for x in grads], axis=0), array_ops.concat([x.indices for x in grads], axis=0), grads[0].dense_shape) return concat_grad def _aggregate_grads(gradients): """Aggregate gradients from multiple sources. Args: gradients: A list of 'Tensor' or 'IndexedSlices' gradients. Returns: If 'gradients' only has 'Tensor', returns an aggregated 'Tensor'. Otherwise returns an aggregated 'IndexedSlices'. """ assert gradients, "No gradients to aggregate" if len(gradients) == 1: return gradients[0] if all(isinstance(g, ops.Tensor) for g in gradients): return gen_math_ops.add_n(gradients) else: assert all(isinstance(g, (ops.Tensor, ops.IndexedSlices)) for g in gradients) return aggregate_indexed_slices_gradients(gradients) def _num_elements(grad): """The number of elements in the `grad` tensor.""" if isinstance(grad, ops.Tensor): shape_tuple = grad._shape_tuple() # pylint: disable=protected-access if shape_tuple is None or None in shape_tuple: return 0 return functools.reduce(operator.mul, shape_tuple, 1) if isinstance(grad, ops.IndexedSlices): return functools.reduce(operator.mul, grad.values._shape_tuple(), 1) # pylint: disable=protected-access raise ValueError("`grad` not a Tensor or IndexedSlices.") def _fast_fill(value, shape, dtype): return array_ops.fill( constant_op.constant(shape, dtype=dtypes.int32), constant_op.constant(value, dtype=dtype)) def _zeros(shape, dtype): """Helper to return (possibly cached) zero tensors in eager mode.""" if (dtype == dtypes.variant or dtype == dtypes.string or dtype == dtypes.resource): # TODO(apassos): need to save enough information about variant tensors to do # a zeros return None ctx = context.context() if not ctx.executing_eagerly(): return array_ops.zeros(shape, dtype) device = ctx.device_name if tensor_util.is_tensor(shape): shape_key = shape.experimental_ref() else: shape_key = shape cache_key = shape_key, dtype, device cached = ctx.zeros_cache().get(cache_key) if cached is None: if dtypes.as_dtype(dtype).is_bool: value = False else: value = 0 cached = _fast_fill(value, shape, dtype) ctx.zeros_cache().put(cache_key, cached) return cached def _ones(shape, dtype): as_dtype = dtypes.as_dtype(dtype) if as_dtype == dtypes.string: return None if not context.context().executing_eagerly(): return array_ops.ones(shape, dtype) if as_dtype.is_bool: value = True else: value = 1 if shape == (): # pylint: disable=g-explicit-bool-comparison return constant_op.constant(value, dtype=dtype) return _fast_fill(value, shape, dtype) _default_vspace = imperative_grad.VSpace( num_elements_fn=_num_elements, aggregate_fn=_aggregate_grads, zeros_fn=_zeros, ones_fn=_ones, graph_shape_fn=gen_array_ops.shape) pywrap_tensorflow.TFE_Py_RegisterVSpace(_default_vspace) def _handle_or_self(x): """If x is ResourceVariable, return its handle, else x.""" if resource_variable_ops.is_resource_variable(x): x = x.handle return x @tf_export("GradientTape") class GradientTape(object): """Record operations for automatic differentiation. Operations are recorded if they are executed within this context manager and at least one of their inputs is being "watched". Trainable variables (created by `tf.Variable` or `tf.compat.v1.get_variable`, where `trainable=True` is default in both cases) are automatically watched. Tensors can be manually watched by invoking the `watch` method on this context manager. For example, consider the function `y = x * x`. The gradient at `x = 3.0` can be computed as: ```python x = tf.constant(3.0) with tf.GradientTape() as g: g.watch(x) y = x * x dy_dx = g.gradient(y, x) # Will compute to 6.0 ``` GradientTapes can be nested to compute higher-order derivatives. For example, ```python x = tf.constant(3.0) with tf.GradientTape() as g: g.watch(x) with tf.GradientTape() as gg: gg.watch(x) y = x * x dy_dx = gg.gradient(y, x) # Will compute to 6.0 d2y_dx2 = g.gradient(dy_dx, x) # Will compute to 2.0 ``` By default, the resources held by a GradientTape are released as soon as GradientTape.gradient() method is called. To compute multiple gradients over the same computation, create a persistent gradient tape. This allows multiple calls to the gradient() method as resources are released when the tape object is garbage collected. For example: ```python x = tf.constant(3.0) with tf.GradientTape(persistent=True) as g: g.watch(x) y = x * x z = y * y dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 3) dy_dx = g.gradient(y, x) # 6.0 del g # Drop the reference to the tape ``` By default GradientTape will automatically watch any trainable variables that are accessed inside the context. If you want fine grained control over which variables are watched you can disable automatic tracking by passing `watch_accessed_variables=False` to the tape constructor: ```python with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(variable_a) y = variable_a ** 2 # Gradients will be available for `variable_a`. z = variable_b ** 3 # No gradients will be available since `variable_b` is # not being watched. ``` Note that when using models you should ensure that your variables exist when using `watch_accessed_variables=False`. Otherwise it's quite easy to make your first iteration not have any gradients: ```python a = tf.keras.layers.Dense(32) b = tf.keras.layers.Dense(32) with tf.GradientTape(watch_accessed_variables=False) as tape: tape.watch(a.variables) # Since `a.build` has not been called at this point # `a.variables` will return an empty list and the # tape will not be watching anything. result = b(a(inputs)) tape.gradient(result, a.variables) # The result of this computation will be # a list of `None`s since a's variables # are not being watched. ``` Note that only tensors with real or complex dtypes are differentiable. """ def __init__(self, persistent=False, watch_accessed_variables=True): """Creates a new GradientTape. Args: persistent: Boolean controlling whether a persistent gradient tape is created. False by default, which means at most one call can be made to the gradient() method on this object. watch_accessed_variables: Boolean controlling whether the tape will automatically `watch` any (trainable) variables accessed while the tape is active. Defaults to True meaning gradients can be requested from any result computed in the tape derived from reading a trainable `Variable`. If False users must explicitly `watch` any `Variable`s they want to request gradients from. """ self._tape = None self._persistent = persistent self._watch_accessed_variables = watch_accessed_variables self._recording = False self._created_eagerly = context.executing_eagerly() if self._created_eagerly: context.ensure_initialized() context.context().start_step() def __enter__(self): """Enters a context inside which operations are recorded on this tape.""" self._push_tape() return self def __exit__(self, typ, value, traceback): """Exits the recording context, no further operations are traced.""" if self._recording: self._pop_tape() def _push_tape(self): if self._recording: raise ValueError("Tape is already recording.") if self._tape is None: self._tape = tape.push_new_tape( persistent=self._persistent, watch_accessed_variables=self._watch_accessed_variables) else: tape.push_tape(self._tape) self._recording = True def _pop_tape(self): if not self._recording: raise ValueError("Tape is not recording.") tape.pop_tape(self._tape) self._recording = False def __del__(self): if self._created_eagerly: try: context.context().end_step() except AttributeError: pass except TypeError: pass def watch(self, tensor): """Ensures that `tensor` is being traced by this tape. Args: tensor: a Tensor or list of Tensors. Raises: ValueError: if it encounters something that is not a tensor. """ for t in nest.flatten(tensor): if not (pywrap_tensorflow.IsTensor(t) or pywrap_tensorflow.IsVariable(t)): raise ValueError("Passed in object of type {}, not tf.Tensor".format( type(t))) if not t.dtype.is_floating: logging.log_first_n( logging.WARN, "The dtype of the watched tensor must be " "floating (e.g. tf.float32), got %r", 5, t.dtype) if hasattr(t, "handle"): # There are many variable-like objects, all of them currently have # `handle` attribute that points to a tensor. If this changes, internals # of watch_variable need to change as well. tape.watch_variable(self._tape, t) else: tape.watch(self._tape, t) @tf_contextlib.contextmanager def stop_recording(self): """Temporarily stops recording operations on this tape. Operations executed while this context manager is active will not be recorded on the tape. This is useful for reducing the memory used by tracing all computations. For example: ``` with tf.GradientTape(persistent=True) as t: loss = compute_loss(model) with t.stop_recording(): # The gradient computation below is not traced, saving memory. grads = t.gradient(loss, model.variables) ``` Yields: None Raises: RuntimeError: if the tape is not currently recording. """ if self._tape is None: raise RuntimeError( "Trying to stop recording a tape which is not recording.") self._pop_tape() try: yield finally: self._push_tape() def reset(self): """Clears all information stored in this tape. Equivalent to exiting and reentering the tape context manager with a new tape. For example, the two following code blocks are equivalent: ``` with tf.GradientTape() as t: loss = loss_fn() with tf.GradientTape() as t: loss += other_loss_fn() t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn # The following is equivalent to the above with tf.GradientTape() as t: loss = loss_fn() t.reset() loss += other_loss_fn() t.gradient(loss, ...) # Only differentiates other_loss_fn, not loss_fn ``` This is useful if you don't want to exit the context manager for the tape, or can't because the desired reset point is inside a control flow construct: ``` with tf.GradientTape() as t: loss = ... if loss > k: t.reset() ``` """ self._pop_tape() self._tape = None self._push_tape() def watched_variables(self): """Returns variables watched by this tape in order of construction.""" return self._tape.watched_variables() def gradient(self, target, sources, output_gradients=None, unconnected_gradients=UnconnectedGradients.NONE): """Computes the gradient using operations recorded in context of this tape. Args: target: a list or nested structure of Tensors or Variables to be differentiated. sources: a list or nested structure of Tensors or Variables. `target` will be differentiated against elements in `sources`. output_gradients: a list of gradients, one for each element of target. Defaults to None. unconnected_gradients: a value which can either hold 'none' or 'zero' and alters the value which will be returned if the target and sources are unconnected. The possible values and effects are detailed in 'UnconnectedGradients' and it defaults to 'none'. Returns: a list or nested structure of Tensors (or IndexedSlices, or None), one for each element in `sources`. Returned structure is the same as the structure of `sources`. Raises: RuntimeError: if called inside the context of the tape, or if called more than once on a non-persistent tape. ValueError: if the target is a variable or if unconnected gradients is called with an unknown value. """ if self._tape is None: raise RuntimeError("GradientTape.gradient can only be called once on " "non-persistent tapes.") if self._recording: if not self._persistent: self._pop_tape() else: logging.log_first_n( logging.WARN, "Calling GradientTape.gradient on a persistent " "tape inside its context is significantly less " "efficient than calling it outside the context (it " "causes the gradient ops to be recorded on the " "tape, leading to increased CPU and memory usage). " "Only call GradientTape.gradient inside the " "context if you actually want to trace the " "gradient in order to compute higher order " "derivatives.", 1) flat_targets = [] for t in nest.flatten(target): if not t.dtype.is_floating: logging.vlog( logging.WARN, "The dtype of the target tensor must be " "floating (e.g. tf.float32) when calling GradientTape.gradient, " "got %r", t.dtype) if resource_variable_ops.is_resource_variable(t): with self: t = ops.convert_to_tensor(t) flat_targets.append(t) flat_sources = nest.flatten(sources) flat_sources_raw = flat_sources flat_sources = [_handle_or_self(x) for x in flat_sources] for t in flat_sources_raw: if not t.dtype.is_floating: logging.vlog( logging.WARN, "The dtype of the source tensor must be " "floating (e.g. tf.float32) when calling GradientTape.gradient, " "got %r", t.dtype) if output_gradients is not None: output_gradients = [None if x is None else ops.convert_to_tensor(x) for x in nest.flatten(output_gradients)] flat_grad = imperative_grad.imperative_grad( self._tape, flat_targets, flat_sources, output_gradients=output_gradients, sources_raw=flat_sources_raw, unconnected_gradients=unconnected_gradients) if not self._persistent: self._tape = None grad = nest.pack_sequence_as(sources, flat_grad) return grad def jacobian(self, target, sources, unconnected_gradients=UnconnectedGradients.NONE, parallel_iterations=None, experimental_use_pfor=True): """Computes the jacobian using operations recorded in context of this tape. See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the definition of a Jacobian. Example usage: ```python with tf.GradientTape() as g: x = tf.constant([1.0, 2.0]) g.watch(x) y = x * x jacobian = g.jacobian(y, x) # jacobian value is [[2., 0.], [0., 4.]] ``` Args: target: Tensor to be differentiated. sources: a list or nested structure of Tensors or Variables. `target` will be differentiated against elements in `sources`. unconnected_gradients: a value which can either hold 'none' or 'zero' and alters the value which will be returned if the target and sources are unconnected. The possible values and effects are detailed in 'UnconnectedGradients' and it defaults to 'none'. parallel_iterations: A knob to control how many iterations are dispatched in parallel. This knob can be used to control the total memory usage. experimental_use_pfor: If true, vectorizes the jacobian computation. Else falls back to a sequential while_loop. Vectorization can sometimes fail or lead to excessive memory usage. This option can be used to disable vectorization in such cases. Returns: A list or nested structure of Tensors (or None), one for each element in `sources`. Returned structure is the same as the structure of `sources`. Note if any gradient is sparse (IndexedSlices), jacobian function currently makes it dense and returns a Tensor instead. This may change in the future. Raises: RuntimeError: If called on a non-persistent tape with eager execution enabled and without enabling experimental_use_pfor. ValueError: If vectorization of jacobian computation fails. """ flat_sources = nest.flatten(sources) target_static_shape = target.shape target_shape = array_ops.shape(target) # Note that we push and pop the tape here and below. This is needed since we # need gradients through the enclosed operations. self._push_tape() target = array_ops.reshape(target, [-1]) self._pop_tape() def loop_fn(i): self._push_tape() y = array_ops.gather(target, i) self._pop_tape() return self.gradient(y, flat_sources, unconnected_gradients=unconnected_gradients) try: target_size = int(target.shape[0]) except TypeError: target_size = array_ops.shape(target)[0] if experimental_use_pfor: try: output = pfor_ops.pfor(loop_fn, target_size, parallel_iterations=parallel_iterations) except ValueError as err: six.reraise( ValueError, ValueError( str(err) + "\nEncountered an exception while vectorizing the " "jacobian computation. Vectorization can be disabled by setting" " experimental_use_pfor to False."), sys.exc_info()[2]) else: if context.executing_eagerly() and not self._persistent: raise RuntimeError( "GradientTape must be created with persistent=True" " to compute the jacobian with eager execution enabled and with " " experimental_use_pfor set to False.") output = pfor_ops.for_loop( loop_fn, [target.dtype] * len(flat_sources), target_size, parallel_iterations=parallel_iterations) for i, out in enumerate(output): if out is not None: new_shape = array_ops.concat( [target_shape, array_ops.shape(out)[1:]], axis=0) out = array_ops.reshape(out, new_shape) if context.executing_eagerly(): out.set_shape(target_static_shape.concatenate(flat_sources[i].shape)) output[i] = out return nest.pack_sequence_as(sources, output) def batch_jacobian(self, target, source, unconnected_gradients=UnconnectedGradients.NONE, parallel_iterations=None, experimental_use_pfor=True): """Computes and stacks per-example jacobians. See [wikipedia article](http://en.wikipedia.org/wiki/jacobian_matrix_and_determinant) for the definition of a Jacobian. This function is essentially an efficient implementation of the following: `tf.stack([self.jacobian(y[i], x[i]) for i in range(x.shape[0])])`. Note that compared to `GradientTape.jacobian` which computes gradient of each output value w.r.t each input value, this function is useful when `target[i,...]` is independent of `source[j,...]` for `j != i`. This assumption allows more efficient computation as compared to `GradientTape.jacobian`. The output, as well as intermediate activations, are lower dimensional and avoid a bunch of redundant zeros which would result in the jacobian computation given the independence assumption. Example usage: ```python with tf.GradientTape() as g: x = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32) g.watch(x) y = x * x batch_jacobian = g.batch_jacobian(y, x) # batch_jacobian is [[[2, 0], [0, 4]], [[6, 0], [0, 8]]] ``` Args: target: A tensor with rank 2 or higher and with shape [b, y1, ..., y_n]. `target[i,...]` should only depend on `source[i,...]`. source: A tensor with rank 2 or higher and with shape [b, x1, ..., x_m]. unconnected_gradients: a value which can either hold 'none' or 'zero' and alters the value which will be returned if the target and sources are unconnected. The possible values and effects are detailed in 'UnconnectedGradients' and it defaults to 'none'. parallel_iterations: A knob to control how many iterations are dispatched in parallel. This knob can be used to control the total memory usage. experimental_use_pfor: If true, uses pfor for computing the Jacobian. Else uses a tf.while_loop. Returns: A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]` is the jacobian of `target[i, ...]` w.r.t. `source[i, ...]`, i.e. stacked per-example jacobians. Raises: RuntimeError: If called on a non-persistent tape with eager execution enabled and without enabling experimental_use_pfor. ValueError: If vectorization of jacobian computation fails or if first dimension of `target` and `source` do not match. """ target_shape = target.shape if target_shape.rank is None: dim = tensor_shape.Dimension(None) else: dim = target_shape.dims[0] if not (target_shape.with_rank_at_least(2) and source.shape.with_rank_at_least(2) and dim.is_compatible_with(source.shape[0])): raise ValueError( "Need first dimension of target shape (%s) and " "source shape (%s) to match." % (target.shape, source.shape)) if target_shape.is_fully_defined(): batch_size = int(target_shape[0]) target_row_size = target_shape.num_elements() // batch_size else: target_shape = array_ops.shape(target) batch_size = target_shape[0] target_row_size = array_ops.size(target) // batch_size source_shape = array_ops.shape(source) # Flatten target to 2-D. # Note that we push and pop the tape here and below. This is needed since we # need gradients through the enclosed operations. self._push_tape() with ops.control_dependencies( [check_ops.assert_equal(batch_size, source_shape[0])]): target = array_ops.reshape(target, [batch_size, target_row_size]) self._pop_tape() def loop_fn(i): self._push_tape() y = array_ops.gather(target, i, axis=1) self._pop_tape() return self.gradient(y, source, unconnected_gradients=unconnected_gradients) if experimental_use_pfor: try: output = pfor_ops.pfor(loop_fn, target_row_size, parallel_iterations=parallel_iterations) except ValueError as err: six.reraise( ValueError, ValueError( str(err) + "\nEncountered an exception while vectorizing the " "batch_jacobian computation. Vectorization can be disabled by " "setting experimental_use_pfor to False."), sys.exc_info()[2]) else: if context.executing_eagerly() and not self._persistent: raise RuntimeError( "GradientTape must be created with persistent=True" " to compute the batch_jacobian with eager execution enabled and " " with experimental_use_pfor set to False.") output = pfor_ops.for_loop(loop_fn, target.dtype, target_row_size, parallel_iterations=parallel_iterations) new_shape = array_ops.concat([target_shape, source_shape[1:]], axis=0) if output is None: return array_ops.zeros(new_shape) else: output = array_ops.reshape(output, [target_row_size, batch_size, -1]) output = array_ops.transpose(output, [1, 0, 2]) return array_ops.reshape(output, new_shape)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/backprop.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=unidiomatic-typecheck """Defun decorator for defining graph-mode functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import enum # pylint: disable=g-bad-import-order import functools import itertools import threading import types as types_lib import weakref import numpy as np import six from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import context from tensorflow.python.eager import execute from tensorflow.python.eager import tape from tensorflow.python.eager.graph_only_ops import graph_placeholder from tensorflow.python.framework import c_api_util from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import constant_op from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import error_interpolation from tensorflow.python.framework import errors from tensorflow.python.framework import func_graph as func_graph_module from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.ops import array_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import default_gradient from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gradients_util from tensorflow.python.ops import resource_variable_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import function_utils from tensorflow.python.util import lazy_loader from tensorflow.python.util import memory from tensorflow.python.util import nest from tensorflow.python.util import object_identity from tensorflow.python.util import tf_decorator from tensorflow.python.util import tf_inspect # Loaded lazily due to a circular dependency (roughly # tf.function->autograph->->dataset->tf.function). # TODO(b/133251390): Use a regular import. ag_ctx = lazy_loader.LazyLoader( "ag_ctx", globals(), "tensorflow.python.autograph.core.ag_ctx") FORWARD_FUNCTION_ATTRIBUTE_NAME = "forward_function_name" BACKWARD_FUNCTION_ATTRIBUTE_NAME = "backward_function_name" class CacheKey( collections.namedtuple("CacheKey", [ "input_signature", "parent_graph", "device_functions", "colocation_stack", "in_cross_replica_context" ])): """Named tuple used to key the function cache.""" def __hash__(self): """Provide a hash even if the input signature objects aren't hashable.""" return hash(self._fields_safe) @property def _fields_safe(self): """Hash & equality-safe version of all the namedtuple fields.""" return (self._hash_fix(self.input_signature), self.parent_graph, self.device_functions, self.colocation_stack, self.in_cross_replica_context) def _hash_fix(self, elem): """Ensure elem is hashable even if a Variable is nested in it.""" # Descend into tuples if isinstance(elem, tuple): return tuple(self._hash_fix(i) for i in elem) if isinstance(elem, set): return {self._hash_fix(i) for i in elem} # If the element is not hashable, assume it is a weakref to a variable and # return the dtype & shape. Else, simply return the element try: hash(elem) except TypeError: v = elem() return (v.__class__, tensor_spec.TensorSpec(v.shape, v.dtype)) return elem def __eq__(self, other): return self._fields_safe == other._fields_safe # pylint: disable=protected-access CacheKey.replace = CacheKey._replace # pylint: disable=protected-access def _flat_shape_list(*params): """Return a flat list of TensorShapes, one for each tensor[spec] in `*params`. If `params` contains `CompositeTensors`, then they are expanded to their components `Tensors`. Args: *params: Set of nested entries containing Tensors, TensorSpec, and non-tensors. Returns: A list of entries containing either `None` or `TensorShape`. """ return [tensor_shape.TensorShape(x.shape) if isinstance(x, (ops.Tensor, tensor_spec.TensorSpec)) else None for x in nest.flatten(params, expand_composites=True)] def _shape_less_specific_than(relaxed, to_check): """Checks if `relaxed` is less specific than `to_check`. This is an asymmetric check, unlike `TensorShape.is_compatible_with`. If `to_check` has a dimension with an undefined shape, `relaxed` must also have an undefined shape for that dimension. Args: relaxed: A `TensorShape` to check against. to_check: A second `TensorShape`. Returns: True if `to_check` represents a set of shapes which is a subset of `relaxed`'s shapes and False otherwise. """ if to_check.dims is not None and relaxed.dims is not None: if to_check.rank != relaxed.rank: return False for check_dim, relaxed_dim in zip(to_check.dims, relaxed.dims): if check_dim.value is None and relaxed_dim.value is not None: return False if not relaxed_dim.is_compatible_with(check_dim): return False return True def _compatible_shapes(flat_relaxed, flat_to_check): """Check if lists of TensorShapes contain compatible shapes. Checks that each `flat_relaxed` shape covers a superset of the shapes of the corresponding `flat_to_check` shape. Args: flat_relaxed: List of TensorShape or None. flat_to_check: List of TensorShape or None. Returns: A python bool. Raises: RuntimeError: if `len(flat_relaxed) != len(flat_to_check)`. RuntimeError: if `flat_relaxed[i] is None != flat_to_check[i] is None` for any `i`. """ if len(flat_relaxed) != len(flat_to_check): raise RuntimeError("Expected shape lists of identical lengths, but saw: " "%s and %s" % (flat_relaxed, flat_to_check)) def is_compatible(relaxed, to_check): """Internal help function. Args: relaxed: TensorShape or None. to_check: TensorShape or None. Returns: Python bool. Raises: RuntimeError: If `relaxed is None != to_check is None`. """ # If both x and y are None, there is no shape to compare. Otherwise check # if they are compatible with each other. Either way, both input signatures # must have have Tensors in the same entries. If not, raise an assertion # error. if relaxed is None != to_check is None: raise RuntimeError( "Expected signature type matches between flattened input shapes " "%s and %s; but saw that (%s is None) != (%s is None)" % (flat_relaxed, flat_to_check, relaxed, to_check)) return relaxed is None or _shape_less_specific_than(relaxed, to_check) return all(is_compatible(relaxed, to_check) for relaxed, to_check in zip(flat_relaxed, flat_to_check)) def common_shape(x, y): """Find a `TensorShape` that is compatible with both `x` and `y`.""" if x is None != y is None: raise RuntimeError( "Cannot find a common shape when LHS shape is None but RHS shape " "is not (or vice versa): %s vs. %s" % (x, y)) if x is None: return None # The associated input was not a Tensor, no shape generated. if not isinstance(x, tensor_shape.TensorShape): raise TypeError("Expected x to be a TensorShape but saw %s" % (x,)) if not isinstance(y, tensor_shape.TensorShape): raise TypeError("Expected y to be a TensorShape but saw %s" % (y,)) if x.rank != y.rank or x.rank is None: return tensor_shape.TensorShape(None) dims = [] for dim_x, dim_y in zip(x.dims, y.dims): if (dim_x != dim_y or tensor_shape.dimension_value(dim_x) is None or tensor_shape.dimension_value(dim_y) is None): dims.append(None) else: dims.append(tensor_shape.dimension_value(dim_x)) return tensor_shape.TensorShape(dims) def is_same_structure(structure1, structure2, check_values=False): """Check two structures for equality, optionally of types and of values.""" try: nest.assert_same_structure(structure1, structure2, expand_composites=True) except (ValueError, TypeError): return False if check_values: flattened1 = nest.flatten(structure1, expand_composites=True) flattened2 = nest.flatten(structure2, expand_composites=True) # First check the types to avoid AttributeErrors. if any(type(f1) != type(f2) for f1, f2 in zip(flattened1, flattened2)): return False return flattened1 == flattened2 return True def _parse_func_attrs(attributes): """Convert the keyword arguments into function_def attributes. Currently only support primitive types: bool, int, float and string. Args: attributes: the dictionary of attributes. Returns: A dict of attributes where the key is the name of attribute and the value is the AttrValue proto. Raises: ValueError: If the kwargs contains unwhitelisted name or unsupported value types. """ attrs = {} for key, value in attributes.items(): if isinstance(value, attr_value_pb2.AttrValue): attrs[key] = value # bool type check has to happen before int since bool is a subclass of int. elif isinstance(value, bool): attrs[key] = attr_value_pb2.AttrValue(b=value) elif isinstance(value, int): attrs[key] = attr_value_pb2.AttrValue(i=value) elif isinstance(value, float): attrs[key] = attr_value_pb2.AttrValue(f=value) elif isinstance(value, (str, bytes, six.text_type)): attrs[key] = attr_value_pb2.AttrValue(s=compat.as_bytes(value)) else: raise ValueError("Unsupported attribute type for %s with type %s" % (key, type(value))) return attrs class _InterpolateFunctionError(object): """Context Manager that interpolates the exception from 'top_level_func'.""" def __init__(self, top_level_func): self._func = top_level_func def __enter__(self): pass def __exit__(self, typ, exc, tb): if not exc or not isinstance(exc, errors.OpError): return False message = compat.as_text(exc.message) _, tags = error_interpolation.parse_message(message) g = None func_stack = [] for t in tags: if t.type == "function_node": # TODO(mdan): Tests should cover this. if t.name == compat.as_str(self._func.name): g = self._func.graph elif g: next_func = g._get_function(t.name) if next_func is not None and isinstance(next_func, _EagerDefinedFunction): g = next_func.graph if g: func_stack.append(g.name) else: func_stack.append("<unknown>") if g: message = error_interpolation.interpolate(message, g) message += "\n\nFunction call stack:\n" message += " -> ".join(func_stack) message += "\n" exc._message = message # pylint: disable=protected-access return False def _forward_name(n): """The name of a generated forward defun named n.""" return "__forward_%s_%s" % (n, ops.uid()) def _backward_name(n): """The name of a generated backward defun named n.""" return "__backward_%s_%s" % (n, ops.uid()) def _inference_name(n): """The name of a forward-but-no-gradient defun named n.""" return "__inference_%s_%s" % (n, ops.uid()) class _EagerDefinedFunctionDeleter(object): """Unregister function from eager context.""" def __init__(self, name): self.name = name def __del__(self): try: context.remove_function(self.name) except TypeError: # Suppress some exceptions, mainly for the case when we're running on # module deletion. Things that can go wrong include the context module # already being unloaded, self._handle._handle_data no longer being # valid, and so on. Printing warnings in these cases is silly # (exceptions raised from __del__ are printed as warnings to stderr). pass # 'NoneType' object is not callable when the handle has been # partially unloaded. except AttributeError: pass # 'NoneType' object has no attribute 'eager_mode' when context has # been unloaded. Will catch other module unloads as well. # TODO(apassos) get rid of this by splitting framework.function._DefinedFunction # so it doesn't have the definition-generating logic and is just a container for # an already-defined function. class _EagerDefinedFunction(object): """Callable with the interface of `framework.function._DefinedFunction`. `_EagerDefinedFunction` encapsulates a function definition and its properties, and it provides a method for calling the encapsulated function. Some Ops take functions as attributes, which have type `func`; an instance of this class may be provided as the value of these `func` attributes. """ def __init__(self, name, graph, inputs, outputs, attrs): """Initializes an eager defined function. Args: name: str, the name for the created function. graph: Graph, the graph containing the operations in the function inputs: the tensors in the graph to be used as inputs to the function outputs: the tensors in the graph which will be outputs to the function attrs: dict mapping names of attributes to their AttrValue values """ input_ops = set(arg.op for arg in inputs) operations = [op for op in graph.get_operations() if op not in input_ops] graph_output_names = graph._output_names # pylint: disable=protected-access if (graph_output_names is not None and all(ops.tensor_id(t) in graph_output_names for t in outputs)): output_names = [ compat.as_bytes(graph_output_names[ops.tensor_id(t)]) for t in outputs ] if len(set(output_names)) != len(output_names): # There are duplicate names for some reason, probably an invalid # signature. Revert to auto-naming. output_names = [] else: output_names = [] fn = pywrap_tensorflow.TF_GraphToFunction_wrapper( graph._c_graph, # pylint: disable=protected-access compat.as_str(name), False, [o._c_op for o in operations], # pylint: disable=protected-access [t._as_tf_output() for t in inputs], # pylint: disable=protected-access [t._as_tf_output() for t in outputs], # pylint: disable=protected-access output_names, [o._c_op for o in graph.control_outputs], # pylint: disable=protected-access [], # control_output_names None, compat.as_str("")) for name, attr_value in attrs.items(): serialized = attr_value.SerializeToString() # TODO(iga): this creates and deletes a new TF_Status for every attr. # It might be worth creating a convenient way to re-use status. pywrap_tensorflow.TF_FunctionSetAttrValueProto( fn, compat.as_str(name), serialized) # TODO(apassos) avoid creating a FunctionDef (specially to grab the # signature, but also in general it's nice not to depend on it. with c_api_util.tf_buffer() as buffer_: pywrap_tensorflow.TF_FunctionToFunctionDef(fn, buffer_) proto_data = pywrap_tensorflow.TF_GetBuffer(buffer_) function_def = function_pb2.FunctionDef() function_def.ParseFromString(compat.as_bytes(proto_data)) self.name = compat.as_bytes(function_def.signature.name) with ops.init_scope(): if context.executing_eagerly(): context.ensure_initialized() context.add_function(fn) self._function_deleter = _EagerDefinedFunctionDeleter(self.name) self._registered_on_context = True self.definition = function_def self.signature = function_def.signature self._num_outputs = len(self.signature.output_arg) self._output_types = [o.type for o in self.signature.output_arg] self._output_shapes = [o.shape for o in outputs] self._control_captures = graph.control_captures # Shallow copy outputs since ConcreteFunction may mutate it. self._func_graph_outputs = list(outputs) self.grad_func_name = None self.python_grad_func = None self._c_func = c_api_util.ScopedTFFunction(fn) self._grad_func = None self.graph = graph self._stateful_ops = tuple(op for op in operations if op._is_stateful) # pylint: disable=protected-access def add_to_graph(self, g=None): # pylint: disable=protected-access if not g and context.executing_eagerly(): context.context().add_function_def(self.definition) else: if self.name not in g._functions: g._add_function(self) for f in self.graph._functions.values(): if f.name not in g._functions: g._add_function(f) # pylint: enable=protected-access @property def stateful_ops(self): return self._stateful_ops def call(self, ctx, args, cancellation_manager=None): """Calls this function with `args` as inputs. `ConcreteFunction` execution respects device annotations only if the function won't be compiled with xla. Args: ctx: a Context object args: a list of arguments to supply this function with. cancellation_manager: a `CancellationManager` object that can be used to cancel function execution. Returns: The outputs of the function call. Raises: ValueError: if the number of arguments is incorrect. """ if len(args) != len(self.signature.input_arg): raise ValueError( "Arguments and signature arguments do not match. " "got: %s, expected: %s " % (len(args), len(list(self.signature.input_arg)))) function_call_options = ctx.function_call_options if function_call_options.config_proto_serialized is None: config = function_utils.get_disabled_rewriter_config() else: config = function_call_options.config_proto_serialized executor_type = function_call_options.executor_type or "" executing_eagerly = ctx.executing_eagerly() if executing_eagerly: with _InterpolateFunctionError(self): if cancellation_manager is None: outputs = execute.execute( str(self.signature.name), num_outputs=self._num_outputs, inputs=args, attrs=("executor_type", executor_type, "config_proto", config), ctx=ctx) else: outputs = execute.execute_with_cancellation( str(self.signature.name), num_outputs=self._num_outputs, inputs=args, attrs=("executor_type", executor_type, "config_proto", config), ctx=ctx, cancellation_manager=cancellation_manager) # Replace empty list with None outputs = outputs or None else: # TODO(akshayka): Either remove this if the FunctionLibraryRuntime # creates `PartitionedCallOp` kernels by default, or remove the previous # branch if a TPU kernel is registered for `PartitionedCall`. with _InterpolateFunctionError(self): with ops.control_dependencies(self._control_captures): # The caller must use record_operation to record this operation in the # eager case, so we enforce the same requirement for the non-eager # case by explicitly pausing recording. We don't have a gradient # registered for PartitionedCall, so recording this operation confuses # forwardprop code (GradientTape manages to ignore it). with tape.stop_recording(): outputs = functional_ops.partitioned_call( args=args, f=self, tout=self._output_types, executing_eagerly=executing_eagerly, config=config, executor_type=executor_type) if executing_eagerly: return outputs else: # TODO(b/128924522): This additional set_shape should not be # necessary. ShapeRefiner likely needs to inspect handle_data. Remove this # once that's done. for i, shape in enumerate(self._output_shapes): outputs[i].set_shape(shape) for i, func_graph_output in enumerate(self._func_graph_outputs): custom_gradient.copy_handle_data(func_graph_output, outputs[i]) return outputs class _DelayedRewriteGradientFunctions(object): """Caches forward/backward functions with a delayed forward rewrite.""" def __init__(self, func_graph, attrs, func_graph_deleter): """Construct an inference function and initialize caches.""" # A map from the number of forward function outputs with accepted gradients # to forward and backward functions, used to cache non-tape backward # function generation. self._cached_function_pairs = {} self._func_graph = func_graph self._inference_function = _EagerDefinedFunction( _inference_name(self._func_graph.name), self._func_graph, self._func_graph.inputs, self._func_graph.outputs, attrs) self._attrs = attrs self._gradient_name = None # Note that the FuncGraph is mutated later, so we need to inspect it now to # figure out the user-specified outputs of the inference function. self._num_inference_outputs = len(self._func_graph.outputs) self._func_graph_deleter = func_graph_deleter def forward_backward(self, num_doutputs=None): """A possibly-cached pair of forward and backward functions.""" if num_doutputs is None: num_doutputs = self._num_inference_outputs forward_backward = self._cached_function_pairs.get(num_doutputs) if forward_backward is not None: return forward_backward forward, backward = self._construct_forward_backward(num_doutputs) self._cached_function_pairs[num_doutputs] = (forward, backward) return forward, backward def _construct_forward_backward(self, num_doutputs): """Constructs a pair of forward and backward functions. Args: num_doutputs: The constructed backprop function will take output gradients for the first `num_doutputs` outputs of the forward function. Defaults to the number of outputs for the inference function, but when higher-order gradients are computed this will increase to include side outputs. Returns: A pair of (forward_function, backward_function): forward_function: A re-generated inference function (an _EagerDefinedFunction) to account for new side outputs, if any extra were required when building the backward pass. backward_function: A ConcreteFunction that Takes `num_doutputs` arguments and returns gradients with respect to inputs of the forward function. """ trainable_outputs = [ output for output in self._func_graph.outputs[:num_doutputs] if gradients_util.IsTrainable(output)] signature = [] for t in trainable_outputs: signature.append( tensor_spec.TensorSpec(*default_gradient.shape_and_dtype(t))) def _backprop_function(*grad_ys): return gradients_util._GradientsHelper( # pylint: disable=protected-access trainable_outputs, self._func_graph.inputs, grad_ys=grad_ys, src_graph=self._func_graph) with self._func_graph.as_default(): backwards_graph = func_graph_module.FuncGraph( _backward_name(self._func_graph.name)) func_graph_module.func_graph_from_py_func( name=backwards_graph.name, python_func=_backprop_function, args=[], kwargs={}, signature=signature, func_graph=backwards_graph) backwards_graph_captures = backwards_graph.external_captures captures_from_forward = [ c for c in backwards_graph_captures if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph] forward_function_name = _forward_name(self._func_graph.name) existing_outputs = object_identity.ObjectIdentitySet( self._func_graph.outputs) for capture in captures_from_forward: if capture not in existing_outputs: existing_outputs.add(capture) self._func_graph.outputs.append(capture) backward_function_attr = _parse_func_attrs( {FORWARD_FUNCTION_ATTRIBUTE_NAME: forward_function_name}) backward_function_attr.update(self._attrs) backward_function = ConcreteFunction( backwards_graph, attrs=backward_function_attr) forward_function_attr = _parse_func_attrs({ BACKWARD_FUNCTION_ATTRIBUTE_NAME: backward_function.name}) forward_function_attr.update(self._attrs) forward_function = _EagerDefinedFunction( forward_function_name, self._func_graph, self._func_graph.inputs, self._func_graph.outputs, forward_function_attr) return forward_function, backward_function def _rewrite_forward_and_call_backward(self, op, *doutputs): """Add outputs to the forward call and feed them to the grad function.""" forward_function, backwards_function = self.forward_backward(len(doutputs)) if not backwards_function.outputs: return [] forward_function.add_to_graph(op.graph) # pylint: disable=protected-access # Rewrite an inference call op to be a forward call op op._set_func_attr("f", forward_function.name) op._set_type_list_attr("Tout", forward_function._output_types) op._add_outputs( forward_function._output_types[len(op.outputs):], forward_function._output_shapes[len(op.outputs):]) for i in range(len(op.outputs)): func_graph_output = forward_function._func_graph_outputs[i] custom_gradient.copy_handle_data(func_graph_output, op.outputs[i]) # pylint: enable=protected-access capture_mapping = dict( zip([ops.tensor_id(t) for t in self._func_graph.outputs], op.outputs)) remapped_captures = [ capture_mapping.get(ops.tensor_id(capture), capture) for capture in backwards_function.captured_inputs ] # Replace Nones with zeros since we're calling a graph function which # expects numeric inputs. cleaned_doutputs = [] for doutput, placeholder in zip(doutputs, self._func_graph.outputs): if gradients_util.IsTrainable(placeholder): if doutput is not None: cleaned_doutputs.append(doutput) else: cleaned_doutputs.append(default_gradient.zeros_like(placeholder)) # Compute the gradients using the side outputs return backwards_function._call_flat( # pylint: disable=protected-access cleaned_doutputs, remapped_captures) def register(self): """Registers a delayed-rewrite gradient with a unique name (idempotent). The gradient rewrites an inference call op to a forward call op, but does not modify a pre-existing forward call op. It then computes the gradient from the output's gradients and the side outputs of the forward op. Returns: The name under which gradient was registered. """ if self._gradient_name: return self._gradient_name self._gradient_name = "PartitionedCall-%s" % ops.uid() @ops.RegisterGradient(self._gradient_name) def _registered_grad_fn(op, *doutputs): # pylint: disable=unused-variable return self._rewrite_forward_and_call_backward(op, *doutputs) return self._gradient_name @property def forward(self): """A forward function with only user-specified outputs. The call operation for the returned inference function can be rewritten into a forward function. This only happens if the backward function (from the `backward` method) ends up being used to compute gradients. This approach avoids constructing unnecessary graphs, but it only works if we are calling this function when not executing eagerly. Returns: An _EagerDefinedFunction. """ return self._inference_function def backward(self, outputs): """Fetch a backward function for `outputs` from the forward function.""" def _backward_function(*args): call_op = outputs[0].op return self._rewrite_forward_and_call_backward(call_op, *args) return _backward_function, outputs class _TapeGradientFunctions(object): """Caches forward and backward functions compatible with eager gradients. In contrast to the delayed-rewrite approach in `_DelayedRewriteGradientFunctions` which only works with delayed execution, the forward function generated by this class has a fixed set of outputs which may be preserved by a tape in order to compute gradients later. This class is abstract; its child classes differ in how many side outputs of the forward function their backward function accepts gradients for, which determines whether higher-order tape gradients are possible. """ def __init__(self, func_graph, attrs, func_graph_deleter): self._func_graph = func_graph self._attrs = attrs self._forward = None self._backward = None self._num_outputs = len(func_graph.outputs) self._func_graph_deleter = func_graph_deleter def _build_functions_for_outputs(self, outputs): """Forward+backward functions where the backward function sees `outputs`.""" # First figure out which of `outputs` are trainable. We'll accept gradients # for each of these in the backward function. handles_to_variables = self._func_graph.variable_captures trainable_outputs = [] for output in outputs: if gradients_util.IsTrainable(output): # Swap in the Variable object for resource handles if we can so # sparse gradients work. output = handles_to_variables.get(ops.tensor_id(output), output) trainable_outputs.append(output) backwards_graph = func_graph_module.FuncGraph( _backward_name(self._func_graph.name)) # Keep track of the forward graph so that if the backwards graph # tries to capture tensors those will be correctly captured first in # the forward graph. This is an edge case that can only happen with # tf.custom_gradient. backwards_graph._forward_func_graph = self._func_graph # pylint: disable=protected-access with backwards_graph.as_default(): gradients_wrt_outputs = [] for output in trainable_outputs: gradient_shape, gradient_dtype = default_gradient.shape_and_dtype( output) gradients_wrt_outputs.append( graph_placeholder(gradient_dtype, gradient_shape)) gradients_wrt_inputs = gradients_util._GradientsHelper( # pylint: disable=protected-access trainable_outputs, self._func_graph.inputs, grad_ys=gradients_wrt_outputs, src_graph=self._func_graph) captures_from_forward = [ c for c in backwards_graph.external_captures if not isinstance(c, ops.EagerTensor) and c.graph is self._func_graph ] existing_outputs = object_identity.ObjectIdentitySet( self._func_graph.outputs) for capture in captures_from_forward: if capture not in existing_outputs: existing_outputs.add(capture) self._func_graph.outputs.append(capture) forward_function_name = _forward_name(self._func_graph.name) backward_function_attr = _parse_func_attrs( {FORWARD_FUNCTION_ATTRIBUTE_NAME: forward_function_name}) backward_function_attr.update(self._attrs) # The ordering of `backwards_graph.inputs` is important: inputs of # `backward_function` correspond to outputs (including # side outputs) of `self._tape_forward_function`. backwards_graph.inputs = ( gradients_wrt_outputs + backwards_graph.internal_captures) backwards_graph.outputs.extend( grad for grad in nest.flatten(gradients_wrt_inputs, expand_composites=True) if grad is not None) backwards_graph.structured_outputs = gradients_wrt_inputs backward_function = ConcreteFunction( backwards_graph, attrs=backward_function_attr) forward_function_attr = _parse_func_attrs({ BACKWARD_FUNCTION_ATTRIBUTE_NAME: backward_function.name}) forward_function_attr.update(self._attrs) forward_function = _EagerDefinedFunction( forward_function_name, self._func_graph, self._func_graph.inputs, self._func_graph.outputs, forward_function_attr) return forward_function, backward_function @property def forward(self): """Construct or fetch a forward function with side-outputs. When graph building without a tape active, symbolic gradients rely on regenerating the backward function for higher-order gradients (to account for new side outputs of the rewritten forward function call). Thus there is no fixed backward function for this case. However, when a tape is active (eager or graph building), we generate fixed backward and forward functions at forward function call time. This difference between the tape and non-tape cases is to avoid building unneeded backward functions while graph building (where we may or may not eventually need gradients). Returns: A forward _EagerDefinedFunction. """ if self._forward is None: self._forward, self._backward = ( self._forward_and_backward_functions()) return self._forward def backward(self, outputs): """Create a backward function given `outputs` from the forward function.""" capture_mapping = dict( zip([ops.tensor_id(t) for t in self._func_graph.outputs], outputs)) remapped_captures = [ capture_mapping.get(ops.tensor_id(capture), capture) for capture in self._backward.captured_inputs ] # We may need to use zeros_like to get a zero for variant Tensors with # unconnected gradients. We do that in advance so we don't have to hold on # to the outputs themselves, which may not be needed otherwise. variant_zeros_like = {} backward_function_inputs = ( len(self._backward.inputs) - len(self._backward.captured_inputs)) recorded_outputs = [] trainable_recorded_outputs = 0 skip_positions = [] for output_index, output in enumerate(outputs): if trainable_recorded_outputs < backward_function_inputs: recorded_outputs.append(output) if gradients_util.IsTrainable(output): trainable_recorded_outputs += 1 else: skip_positions.append(output_index) if output.dtype == dtypes.variant: variant_zeros_like[output_index] = default_gradient.zeros_like(output) def _backward_function_wrapper(*args): """Process output gradients and call the backward function.""" if not self._backward.outputs: return [] processed_args = [] input_index = 0 for output_index, arg in enumerate(args): if output_index in skip_positions: continue if arg is None: # We're calling a (non-polymorphic) ConcreteFunction, so we need to # have a Tensor value for each Tensor we thought would be trainable # based on its dtype, even if it ended up being unconnected. input_placeholder = self._backward.inputs[ input_index] if input_placeholder.dtype == dtypes.variant: arg = variant_zeros_like[output_index] else: arg = array_ops.zeros( *default_gradient.shape_and_dtype(input_placeholder)) processed_args.append(arg) input_index += 1 if input_index >= backward_function_inputs: break return self._backward._call_flat( # pylint: disable=protected-access processed_args, remapped_captures) return _backward_function_wrapper, recorded_outputs class _FirstOrderTapeGradientFunctions(_TapeGradientFunctions): """Caches tape-friendly functions for first-order gradients.""" def __init__(self, func_graph, attrs, func_graph_deleter): super(_FirstOrderTapeGradientFunctions, self).__init__( func_graph, attrs, func_graph_deleter) self._num_inference_outputs = len(func_graph.outputs) self._func_graph_deleter = func_graph_deleter def _forward_and_backward_functions(self): """Shortcut for when only first-order gradients are required. The returned backward function does not accept gradients with respect to side output of forward_function. This is fine as long as the user can't possibly request second order tape gradients, as when they've used a single non-persistent GradientTape. Since we don't need the backward function to take gradients with respect to side outputs, we can skip some potentially slow graph building. Returns: A tuple of (forward_function, backward_function): forward_function: Takes the same inputs as the inference function, but returns side outputs used by backward_function in addition to the inference function's outputs. backward_function: Takes side outputs from forward_function and gradients with respect to the "real" outputs of forward_function and returns gradients with respect to the inputs. """ outputs = self._func_graph.outputs[:self._num_inference_outputs] return self._build_functions_for_outputs(outputs) class _HigherOrderTapeGradientFunctions(_TapeGradientFunctions): """Caches tape-friendly functions for higher-order gradients.""" # TODO(b/136189779): Cond/while under a tape may need similar logic. Consider # generalizing if so. def _forward_and_backward_functions(self): """Forward and backward functions suitable for higher-order gradients. Unlike in `_FirstOrderTapeGradientFunctions`, the backward function built by this method accepts gradients for all of the outputs of the returned forward function, including side outputs. Returns: A tuple of (forward_function, backward_function): forward_function: Takes the same inputs as the inference function, but returns side outputs used by backward_function in addition to the inference function's outputs. backward_function: Takes side outputs from forward_function and gradients with respect to all of its outputs, real and side. Returns gradients with respect to the inputs. """ outputs = [] # First we need to figure out how many side outputs from the forward pass # will be required. We do this in a temporary graph to avoid actually # running multiple copies of the backward pass (one per _GradientsHelper # call). # # While computing gradients, the backward function captures Tensors from # the forward function. We add these as side outputs of the original # function. However, we then need to accept output gradients with respect # to these side outputs for higher order gradients to work. Thus we loop # until the number of outputs of the function stabilizes. Note that this # is only required for tape gradients, where we need to declare in advance # all of the forward op's outputs: symbolic gradients with tf.gradients # instead rely on regenerating backward functions when higher-order # gradients are requested. while len(outputs) < len(self._func_graph.outputs): new_outputs = self._func_graph.outputs[len(outputs):] outputs = list(self._func_graph.outputs) self._build_functions_for_outputs(new_outputs) forward_function, backward_function = ( self._build_functions_for_outputs(outputs)) if len(self._func_graph.outputs) != len(outputs): raise AssertionError( ("Unexpectedly added new outputs to the forward function when " "building the backward function: {}").format( self._func_graph.outputs[len(outputs):])) return forward_function, backward_function class _PossibleTapeGradientTypes(enum.Enum): """Represents the output of TFE_Py_TapeSetPossibleGradientTypes.""" NONE = 0 FIRST_ORDER = 1 HIGHER_ORDER = 2 class ConcreteFunction(object): """Callable object encapsulating a function definition and its gradient. `ConcreteFunction` is a callable that encapsulates a function definition and is differentiable under `tf.GradientTape` objects. """ def __init__(self, func_graph, attrs=None, signature=None, shared_func_graph=True): """Initialize a `ConcreteFunction`. Args: func_graph: An instance of FuncGraph: the function body to wrap. attrs: (optional) dict mapping names of attributes to their AttrValue values. Attributes in `attrs` will be included in this function's definition. signature: a nested sequence of `TensorSpec` objects specifying the input signature of this function. shared_func_graph: If False, the ConcreteFunction takes ownership of `func_graph` and will break reference cycles when it is deleted. This makes the FuncGraph inoperable. Raises: ValueError: If number of input_placeholders is not equal to the number of function inputs. """ self._arg_keywords = None self._num_positional_args = None self._func_graph = func_graph self._captured_inputs = self._func_graph.external_captures self._captured_closures = self._func_graph.deferred_external_captures self._output_shapes = tuple( output.shape for output in self._func_graph.outputs) attrs = _parse_func_attrs(attrs or {}) self._signature = signature if shared_func_graph: self._garbage_collector = None else: self._garbage_collector = ConcreteFunctionGarbageCollector( func_graph) # Pairs of forward and backward functions used for computing gradients. # # These each get a reference to the FuncGraph deleter since they use the # FuncGraph directly. self._delayed_rewrite_functions = _DelayedRewriteGradientFunctions( func_graph, attrs, self._garbage_collector) self._first_order_tape_functions = _FirstOrderTapeGradientFunctions( func_graph, attrs, self._garbage_collector) self._higher_order_tape_functions = _HigherOrderTapeGradientFunctions( func_graph, attrs, self._garbage_collector) def __call__(self, *args, **kwargs): """Executes the wrapped function. Args: *args: Tensors or Variables. Positional arguments are only accepted when they correspond one-to-one with arguments of the traced Python function. **kwargs: Tensors or Variables specified by name. When `get_concrete_function` was called to create this `ConcreteFunction`, each Tensor input was given a name, defaulting to the name of the Python function's argument but possibly overridden by the `name=` argument to `tf.TensorSpec`. These names become the argument names for the concrete function. Returns: The result of applying the TF function on the given Tensors. Raises: AssertionError: If this `ConcreteFunction` was not created through `get_concrete_function`. ValueError: If arguments contains anything other than Tensors or Variables. TypeError: For invalid positional/keyword argument combinations. """ return self._call_impl(args, kwargs) def _call_impl(self, args, kwargs, cancellation_manager=None): """See `__call__` for details.""" if self._arg_keywords is None or self._num_positional_args is None: if self._signature is not None: if kwargs: raise NotImplementedError( "Keyword arguments not supported when calling a " "wrap_function-decorated function.") return self._call_flat(args, self.captured_inputs) raise AssertionError( "Tried to call a concrete function obtained from an internal API " "through the public interface. Use get_concrete_function instead.") if len(args) > self._num_positional_args: raise TypeError( ("Expected at most {} positional arguments (and the rest keywords, " "of {}), got {}. When calling a concrete function, positional " "arguments may not be bound to Tensors within nested structures." ).format(self._num_positional_args, self._arg_keywords, args)) args = list(args) for keyword in self._arg_keywords[len(args):]: try: args.append(kwargs.pop(compat.as_str(keyword))) except KeyError: specified_keywords = (list(self._arg_keywords[:len(args)]) + list(kwargs.keys())) raise TypeError( "Expected argument names {} but got values for {}. Missing: {}." .format( list(self._arg_keywords), specified_keywords, list(set(self._arg_keywords) - set(specified_keywords)))) if kwargs: positional_arg_keywords = set(self._arg_keywords[:len(args)]) for unused_key in kwargs: if unused_key in positional_arg_keywords: raise TypeError("Got two values for keyword '{}'.".format(unused_key)) raise TypeError("Keyword arguments {} unknown. Expected {}.".format( list(kwargs.keys()), list(self._arg_keywords))) return self._call_flat(args, self.captured_inputs, cancellation_manager) def _filtered_call(self, args, kwargs): """Executes the function, filtering arguments from the Python function. Objects aside from Tensors, CompositeTensors, and Variables are ignored. CompositeTensors are expanded into their components. Args: args: Canonicalized positional arguments of the Python function. kwargs: Canonicalized keyword arguments of the Python function. Returns: The result of applying the function on the Tensors/Variables contained in `args` and `kwargs`. """ return self._call_flat( (t for t in nest.flatten((args, kwargs), expand_composites=True) if isinstance(t, (ops.Tensor, resource_variable_ops.BaseResourceVariable))), self.captured_inputs) def _call_flat(self, args, captured_inputs, cancellation_manager=None): """Executes the wrapped function. Args: args: a list of Tensors or Variables. Any CompositeTensors should be expanded before calling this method. captured_inputs: the captured inputs that are also part of the input args to the actual execution. By default, it should be self._captured_inputs. cancellation_manager: (Optional.) A `CancellationManager` that can be used to cancel function invocation. Returns: The result of applying the TF function to `args`. Raises: ValueError: If `args` contains anything other than Tensors or Variables. """ args = list(args) ctx = context.context() executing_eagerly = ctx.executing_eagerly() # Copy saveable status of function's graph to current FuncGraph. default_graph = ops.get_default_graph() if default_graph.building_function and not self._func_graph.saveable: default_graph.mark_as_unsaveable(self._func_graph.saving_errors) if any(isinstance(a, composite_tensor.CompositeTensor) for a in args): raise AssertionError("Expected all args to be Tensors or Variables; " "but got CompositeTensor: %r" % args) if (tape.could_possibly_record() or hasattr(ops.get_default_graph(), "watch_variable")): for v in self._func_graph.variables: resource_variable_ops.variable_accessed(v) tensor_inputs = [] variables_used = object_identity.ObjectIdentitySet([]) for i, arg in enumerate(args): if isinstance(arg, resource_variable_ops.BaseResourceVariable): # We can pass a variable more than once, and in this case we need to # pass its handle only once. if arg.handle in variables_used: continue resource_variable_ops.variable_accessed(arg) tensor_inputs.append(arg.handle) variables_used.add(arg.handle) elif isinstance(arg, ops.Tensor): tensor_inputs.append(arg) if not executing_eagerly: # If we're graph building, shape inference is on. We check for input # compatibility up front to avoid hard to debug incompatibilities # later. graph_input_shape = tensor_shape.TensorShape( self._func_graph.inputs[i].shape) if not graph_input_shape.is_compatible_with(arg.shape): if self._arg_keywords: arg_name = "'{}'".format(self._arg_keywords[i]) else: arg_name = "with index {}".format(i) raise ValueError( ("The argument {} (value {}) is not compatible with the shape " "this function was traced with. Expected shape {}, but got " "shape {}.\n\nIf you called get_concrete_function, you may " "need to pass a tf.TensorSpec(..., shape=...) with a less " "specific shape, having None on axes which can vary.").format( arg_name, arg, self._func_graph.inputs[i].shape, arg.shape)) elif (self._signature is not None and isinstance(self._signature[i], tensor_spec.TensorSpec)): tensor_inputs.append( ops.convert_to_tensor(arg, self._signature[i].dtype)) else: raise ValueError("All inputs to `ConcreteFunction`s must be Tensors; " "on invocation of %s, the %d-th input (%s) was not a " "Tensor." % (self._func_graph.name, i, str(arg))) args = tensor_inputs + captured_inputs forward_backward = self._select_forward_and_backward_functions(args) forward_function = forward_backward.forward if executing_eagerly: flat_outputs = forward_function.call( ctx, args, cancellation_manager=cancellation_manager) else: gradient_name = self._delayed_rewrite_functions.register() with ops.get_default_graph().gradient_override_map( {"PartitionedCall": gradient_name, "StatefulPartitionedCall": gradient_name}): flat_outputs = forward_function.call(ctx, args) if isinstance(flat_outputs, ops.Operation) or flat_outputs is None: # We only record function calls which have outputs. return self._build_call_outputs(flat_outputs) backward_function, to_record = forward_backward.backward(flat_outputs) tape.record_operation(forward_function.signature.name, to_record, args, backward_function) return self._build_call_outputs(flat_outputs) def _experimental_with_cancellation_manager(self, cancellation_manager): """Returns a callable that invokes a cancelable version of this function. Args: cancellation_manager: A `CancellationManager` object that can be used to cancel function invocation. Returns: A callable with the same signature as this concrete function. """ def cancellable_call(*args, **kwargs): return self._call_impl( args, kwargs, cancellation_manager=cancellation_manager) return cancellable_call @property def name(self): """`ConcreteFunction` name.""" return self._delayed_rewrite_functions.forward.name @property def graph(self): """Returns the graph from which this function was constructed.""" return self._func_graph @property def inputs(self): """Returns tensors in `self.graph` corresponding to arguments.""" return self._func_graph.inputs @property def structured_input_signature(self): """Returns structured signature of the original function.""" return self._func_graph.structured_input_signature @property def outputs(self): """Returns tensors in `self.graph` corresponding to returned tensors.""" return self._func_graph.outputs @property def structured_outputs(self): """Returns outputs in `self.graph` as returned by the original function.""" return self._func_graph.structured_outputs @property def captured_inputs(self): """Returns external Tensors captured by this function. self.__call__(*args) passes `args + self.captured_inputs` to the function. """ from_closures = nest.flatten([x() for x in self._captured_closures], expand_composites=True) return self._captured_inputs + from_closures @property def function_def(self): """Returns a `FunctionDef` object representing this function.""" return self._delayed_rewrite_functions.forward.definition @property def output_shapes(self): """The function's output shapes.""" return nest.map_structure( lambda x: getattr(x, "shape", tensor_shape.TensorShape(None)), composite_tensor.replace_composites_with_components( self._func_graph.structured_outputs), expand_composites=False) @property def output_dtypes(self): # TODO(akshayka): Consider removing this. return nest.map_structure( lambda x: x.dtype if x is not None else None, composite_tensor.replace_composites_with_components( self._func_graph.structured_outputs), expand_composites=False) def add_to_graph(self, g=None): """Registers the function, adds it to the graph g or default graph. Args: g: If specified, registers the function with this graph. Defaults to the current context (either the default graph or the eager context). """ # If we are not executing eagerly, adds the function to default graph if no # graph is specified. # In case of eager execution, function definition gets added to context # during construction itself. if not context.executing_eagerly() and not g: g = ops.get_default_graph() self._delayed_rewrite_functions.forward.add_to_graph(g) def add_gradient_functions_to_graph(self, g=None): """Add forward/backward functions to graph `g` or the current context.""" if not context.executing_eagerly() and not g: g = ops.get_default_graph() self._delayed_rewrite_functions.forward.add_to_graph(g) forward_function, backward_function = ( self._delayed_rewrite_functions.forward_backward()) forward_function.add_to_graph(g) backward_function.add_to_graph(g) def _register_delayed_rewrite_gradient(self): """Registers a delayed-rewrite gradient function and returns the name.""" return self._delayed_rewrite_functions.register() def _select_forward_and_backward_functions(self, args): """Selects forward and backward functions based on the calling context. The forward function computes the "real" function outputs, `self._outputs`, and any extra values needed by the corresponding backward function. Args: args: A flat list of Tensors with all of the inputs to the forward function (including user-specified and captured inputs). Returns: An object with a `forward` property containing an _EagerDefinedFunction, and a corresponding `backward` method which takes outputs from the forward function and returns a backward function. """ possible_gradient_type = _PossibleTapeGradientTypes( pywrap_tensorflow.TFE_Py_TapeSetPossibleGradientTypes(args)) if possible_gradient_type == _PossibleTapeGradientTypes.FIRST_ORDER: if context.executing_eagerly(): # There is a single non-persistent tape active, so the user can only # request first-order gradients from a tape. We can spend less time # graph building since we know this. # # We may still end up computing higher-order gradients, but that'd be # through `tf.gradients`, which can re-write the forward pass and so # needs no preparation here. return self._first_order_tape_functions else: # We can avoid computing second-order gradients in some cases by doing a # delayed rewrite when graph building. Since we know we'll only compute # first-order tape gradients, the delayed rewrite is safe: we won't need # to tell the tape about side outputs. # # TODO(allenl): This case is really dirty. It would be better if we # could temporarily pop all of the current tapes to avoid # accidentally taking second-order gradients. return self._delayed_rewrite_functions elif possible_gradient_type == _PossibleTapeGradientTypes.HIGHER_ORDER: # Either there's a persistent tape watching, or there are multiple nested # tapes. Either way, the user may request higher-order gradients. We'll # spend a bit more time and make sure higher-order gradients are correct. return self._higher_order_tape_functions # else possible_gradient_type == _PossibleTapeGradientTypes.NONE, meaning no # tape is recording. return self._delayed_rewrite_functions def _build_call_outputs(self, result): """Maps the fdef output list to actual output structure. Args: result: Output lists defined by FunctionDef. Returns: The actual call output. """ if self._func_graph.structured_outputs is None: return result # Replace outputs with results, skipping over any 'None' values. outputs_list = nest.flatten(self._func_graph.structured_outputs, expand_composites=True) j = 0 for i, o in enumerate(outputs_list): if o is not None: outputs_list[i] = result[j] j += 1 ret = nest.pack_sequence_as(self._func_graph.structured_outputs, outputs_list, expand_composites=True) return ret pywrap_tensorflow.RegisterType("Tensor", ops.Tensor) pywrap_tensorflow.RegisterType("IndexedSlices", ops.IndexedSlices) def _deterministic_dict_values(dictionary): return tuple(dictionary[key] for key in sorted(dictionary)) class FunctionSpec(object): """Specification of how to bind arguments to a function.""" @staticmethod def from_function_and_signature(python_function, input_signature): """Create a FunctionSpec instance given a python function and signature.""" fullargspec = tf_inspect.getfullargspec(python_function) # Treat a wrapped partial function as a special case. For all arguments that # were overridden with keywords in the partial: # - remove the corresponding arguments, # - remove the corresponding keywords. _, unwrapped = tf_decorator.unwrap(python_function) # TODO(b/131153379): Consider Python3's fullargspec.kwonlyargs and # fullargspec.kwonlydefaults. if isinstance(unwrapped, functools.partial): # Also consider the Python3 case with kwonlydefaults. if fullargspec.defaults or fullargspec.kwonlydefaults: new_defaults = fullargspec.defaults new_args = fullargspec.args if fullargspec.defaults: # To be able to canonicalize the function properly, we want to ignore # default values that are overridden via a partial kwarg. For example: # # def func(a, b, c, d=5, e=7): # return a, b, c, d, e # p_func = functools.partial(tf.function(func, 10, e=9)) # # Here we want to drop from the defaults the parameter `e`. If we # forwarded the call to the partial function with a default for `e` # we would get an error for passing two values for one parameter. # # Note that this has a limitation: we can only override parameters at # the end of the parameter list. # # In this case we want to end up with 3 arguments (b, c, d) and 1 # default value (5). We do this by constructing a mask where 0 stands # for a value that was overridden by a partial kwarg. The seemingly # complicated logic below does just that - for arguments (b, c, d, e) # we would get a mask (1, 1, 1, 0). old_args = fullargspec.args old_defaults = fullargspec.defaults no_default = object() num_args_without_defaults = len(old_args) - len(old_defaults) left_padding = tuple([no_default] * num_args_without_defaults) args_with_defaults = zip(old_args, left_padding + old_defaults) # Create a mask where 0 stands for args that had a partial kwarg # defined. non_keyword_defaults_mask = [ 0 if key in unwrapped.keywords else 1 for key in old_args ] # Keep only arguments and defaults that were not kwargs of partial. new_args_with_defaults = list( itertools.compress(args_with_defaults, non_keyword_defaults_mask)) # Keep all args. new_args = [arg for arg, _ in new_args_with_defaults] # Keep only real default values. new_defaults = [ default for _, default in new_args_with_defaults if default is not no_default ] fullargspec = tf_inspect.FullArgSpec( args=new_args, varargs=fullargspec.varargs, varkw=fullargspec.varkw, defaults=new_defaults, kwonlyargs=[], kwonlydefaults={}, annotations=fullargspec.annotations) is_method = tf_inspect.ismethod(python_function) return FunctionSpec(fullargspec, is_method, [], {}, input_signature) def __init__(self, fullargspec, is_method, args_to_prepend, kwargs_to_include, input_signature): self._fullargspec = fullargspec self._is_method = is_method del args_to_prepend del kwargs_to_include self._default_values = fullargspec.defaults if self._is_method: # Remove `self`: default arguments shouldn't be matched to it. # TODO(b/127938157): Should this error out if there is no arg to # be removed? args = fullargspec.args[1:] else: args = fullargspec.args # A cache mapping from argument name to index, for canonicalizing # arguments that are called in a keyword-like fashion. self._args_to_indices = {arg: i for i, arg in enumerate(args)} self.arg_names = args self.vararg_name = fullargspec.varargs # A cache mapping from arg index to default value, for canonicalization. offset = len(args) - len(self._default_values or []) self._arg_indices_to_default_values = { offset + index: default for index, default in enumerate(self._default_values or []) } if input_signature is None: self._input_signature = None else: if fullargspec.kwonlyargs: raise ValueError("Cannot define a TensorFlow function from a Python " "function with keyword arguments when " "input_signature is provided.") if not isinstance(input_signature, (tuple, list)): raise TypeError("input_signature must be either a tuple or a " "list, received " + str(type(input_signature))) self._input_signature = tuple(input_signature) self._flat_input_signature = tuple(nest.flatten(input_signature, expand_composites=True)) @property def fullargspec(self): return self._fullargspec @property def is_method(self): return self._is_method @property def args_to_prepend(self): return self._args_to_prepend @property def kwargs_to_include(self): return self._kwargs_to_include @property def input_signature(self): return self._input_signature @property def flat_input_signature(self): return self._flat_input_signature def canonicalize_function_inputs(self, *args, **kwargs): """Canonicalizes `args` and `kwargs`. Canonicalize the inputs to the Python function using a `FunctionSpec` instance. In particular, we parse the varags and kwargs that the original function was called with into a tuple corresponding to the Python function's positional (named) arguments and a dictionary corresponding to its kwargs. Args: *args: The varargs this object was called with. **kwargs: The keyword args this function was called with. Returns: A canonicalized ordering of the inputs representened by a tuple in the form (args, kwargs). Here: `args` is a full list of bound arguments, and `kwargs` contains only true keyword arguments, as opposed to named arguments called in a keyword-like fashion. Raises: ValueError: If a keyword in `kwargs` cannot be matched with a positional argument when an input signature is specified, or when the inputs do not conform to the input signature. """ if self._input_signature is not None: if len(args) > len(self._input_signature): raise TypeError( "When input_signature is provided, only pass arguments " "covered by it. Received %d argument(s)." % len(args)) for arg in six.iterkeys(kwargs): index = self._args_to_indices.get(arg, None) if index is None: raise TypeError( "Function got an unexpected keyword argument %s" % arg) if index >= len(self._input_signature): raise TypeError( "When input_signature is provided, only pass arguments " "covered by it. Received argument %s." % arg) if not kwargs: inputs = args default_keys = sorted(self._arg_indices_to_default_values.keys()) if default_keys: assert min(default_keys) <= len( args), "Not enough arguments (%s, %s, %s)" % (args, default_keys, self.arg_names) for index in default_keys: if index >= len(args): inputs += (self._arg_indices_to_default_values[index],) else: # Maps from index of arg to its corresponding value, according to `args` # and `kwargs`; seeded with the default values for the named args that # aren't in `args`. arg_indices_to_values = { index: default for index, default in six.iteritems( self._arg_indices_to_default_values) if index >= len(args) } consumed_args = [] for arg, value in six.iteritems(kwargs): index = self._args_to_indices.get(arg, None) if index is not None: arg_indices_to_values[index] = value consumed_args.append(arg) elif self._input_signature is not None: raise ValueError("Cannot define a TensorFlow function from a Python " "function with keyword arguments when " "input_signature is provided.") for arg in consumed_args: # After this loop, `kwargs` will only contain true keyword arguments, as # opposed to named arguments called in a keyword-like fashion. kwargs.pop(arg) inputs = args + _deterministic_dict_values(arg_indices_to_values) if self._input_signature is None: inputs = _convert_numpy_inputs(inputs) return inputs, kwargs else: assert not kwargs inputs = _convert_inputs_to_signature( inputs, self._input_signature, self._flat_input_signature) return inputs, {} def _convert_numpy_inputs(inputs): """Convert numpy array inputs to tensors.""" # We assume that any CompositeTensors have already converted their components # from numpy arrays to Tensors, so we don't need to expand composites here. flat_inputs = nest.flatten(inputs, expand_composites=False) # Check for NumPy arrays in arguments and convert them to Tensors. # TODO(nareshmodi): Skip ndarray conversion to tensor altogether, perhaps # finding a way to store them directly in the cache key (currently not # possible since ndarrays are not hashable). need_packing = False for index, value in enumerate(flat_inputs): if type(value) == np.ndarray: flat_inputs[index] = constant_op.constant(value) need_packing = True if need_packing: return nest.pack_sequence_as( structure=inputs, flat_sequence=flat_inputs, expand_composites=False) else: return inputs def _convert_inputs_to_signature(inputs, input_signature, flat_input_signature): """Convert inputs to pass into a function with an explicit signature.""" def format_error_message(inputs, input_signature): return (" inputs: (\n" + " " + ",\n ".join([str(i) for i in inputs]) + ")\n" + " input_signature: (\n" + " " + ",\n ".join([str(i) for i in input_signature]) + ")") try: # TODO(b/124370185): Use all elements as inputs to throw an error if there # are ignored arguments. Calling with arguments that are not part of the # signature should throw an error. flatten_inputs = nest.flatten_up_to( input_signature, inputs[:len(input_signature)], expand_composites=True) except ValueError: raise ValueError("Structure of Python function inputs does not match " "input_signature:\n%s" % format_error_message(inputs, input_signature)) need_packing = False for index, (value, spec) in enumerate(zip(flatten_inputs, flat_input_signature)): if not pywrap_tensorflow.IsTensor(value): try: flatten_inputs[index] = ops.convert_to_tensor( value, dtype_hint=spec.dtype) need_packing = True except ValueError: raise ValueError("When input_signature is provided, all inputs to " "the Python function must be convertible to " "tensors:\n%s" % format_error_message(inputs, input_signature)) if any(not spec.is_compatible_with(other) for spec, other in zip( flat_input_signature, flatten_inputs)): raise ValueError("Python inputs incompatible with input_signature:\n%s" % format_error_message(inputs, input_signature)) if need_packing: inputs = nest.pack_sequence_as( structure=input_signature, flat_sequence=flatten_inputs, expand_composites=True) return inputs class FunctionCache(object): """A lightweight container for cached functions. """ def __init__(self): # The set of functions that have been missed; entries are CacheKey with # input_signature `None` (e.g. a "call context key") self.missed = set() # The primary cache, mapping a fully shaped CacheKey to a function. self.primary = collections.OrderedDict() # A cache key lookup, mapping a CacheKey generated without shape info to a # flat list of relaxed shapes (one for each argument). Arguments that are # not Tensors contain a `None` for the corresponding relaxed shape. self.arg_relaxed_shapes = collections.OrderedDict() # The secondary cache, mapping a CacheKey generated without shape info to a # function. self.arg_relaxed = collections.OrderedDict() # All OrderedDicts require manual garbage collection. self._garbage_collectors = [ _FunctionGarbageCollector(self.primary), _FunctionGarbageCollector(self.arg_relaxed), _FunctionGarbageCollector(self.arg_relaxed_shapes)] def all_values(self): """A set of all `ConcreteFunction` instances held by this cache.""" return set(self.primary.values()) | set(self.arg_relaxed.values()) class Function(object): """Wrapper class for the graph functions defined for a Python function. See the documentation for `defun` for more information on the semantics of defined functions. `Function` class is thread-compatible meaning that minimal usage of defuns (defining and calling) is thread-safe, but if users call other methods or invoke the base `python_function` themselves, external synchronization is necessary. """ def __init__(self, python_function, name, input_signature=None, attributes=None, autograph=True, autograph_options=None, experimental_relax_shapes=False, capture_by_value=None): """Initializes a `Function`. Args: python_function: the function to be wrapped. name: the name given to it. input_signature: a possibly nested sequence of `TensorSpec` objects specifying the input signature of this function. If `None`, a separate function is instantiated for each inferred input signature. attributes: dict, extra keyword arguments that will be added as attribute of the function. autograph: whether to use autograph to compile `python_function`. See https://www.tensorflow.org/guide/autograph for more information. autograph_options: Experimental knobs to control behavior `when autograph=True`. See https://www.tensorflow.org/guide/autograph for more information. experimental_relax_shapes: When true, argument shapes may be relaxed to avoid unecessary retracing. capture_by_value: Experimental. Whether to capture resource variables by value or reference. If None, will inherit from a parent context or default to False. Raises: ValueError: if `input_signature` is not None and the `python_function`'s argspec has keyword arguments. """ self._python_function = python_function self._function_spec = FunctionSpec.from_function_and_signature( python_function, input_signature) self._name = name self._autograph = autograph self._autograph_options = autograph_options self._experimental_relax_shapes = experimental_relax_shapes self._function_cache = FunctionCache() self._function_attributes = attributes or {} self._capture_by_value = capture_by_value self._lock = threading.RLock() # _descriptor_cache is a of instance of a class to an instance-specific # `Function`, used to make sure defun-decorated methods create different # functions for each instance. self._descriptor_cache = weakref.WeakKeyDictionary() def __call__(self, *args, **kwargs): """Calls a graph function specialized to the inputs.""" graph_function, args, kwargs = self._maybe_define_function(args, kwargs) return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access @property def python_function(self): """Returns the wrapped Python function.""" return self._python_function # pylint: disable=protected-access @property def function_spec(self): return self._function_spec @property def input_signature(self): """Returns the input signature.""" return self._function_spec.input_signature @property def flat_input_signature(self): """Returns the flattened input signature.""" return self._function_spec.flat_input_signature def _get_concrete_function_internal_garbage_collected(self, *args, **kwargs): """Returns a concrete function which cleans up its graph function.""" if self.input_signature: args, kwargs = None, None graph_function, _, _ = self._maybe_define_function(args, kwargs) return graph_function def _get_concrete_function_internal(self, *args, **kwargs): """Bypasses error checking when getting a graph function.""" graph_function = self._get_concrete_function_internal_garbage_collected( *args, **kwargs) # We're returning this concrete function to someone, and they may keep a # reference to the FuncGraph without keeping a reference to the # ConcreteFunction object. So we won't clean up the reference cycles # manually and instead will leave them to Python's garbage collector. graph_function._garbage_collector.release() # pylint: disable=protected-access return graph_function def get_concrete_function(self, *args, **kwargs): """Returns a `ConcreteFunction` specialized to inputs and execution context. Args: *args: inputs to specialize on. **kwargs: inputs to specialize on. """ if self.input_signature: if kwargs: raise ValueError("Cannot define a TensorFlow function from a Python " "function with keyword arguments when " "input_signature is provided.") if args: # If args are provided, they must match the input signature. if not is_same_structure(self.input_signature, args): raise ValueError("Structure of Python function inputs does not match " "input_signature.") flat_inputs = nest.flatten(args, expand_composites=True) if any(not isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)) for arg in flat_inputs): raise ValueError("When input_signature is provided, all inputs to " "the Python function must be Tensors or " "tf.TensorSpec objects.") if any(not spec.is_compatible_with(other) for spec, other in zip(self.flat_input_signature, flat_inputs)): raise ValueError("Python inputs incompatible with input_signature: " "inputs (%s), input_signature (%s)" % (str(args), str(self.input_signature))) args, kwargs = None, None graph_function, args, kwargs = self._maybe_define_function(args, kwargs) if self.input_signature: args = self.input_signature kwargs = {} seen_names = set() captured = object_identity.ObjectIdentitySet( graph_function.graph.internal_captures) # pylint: disable=protected-access graph_function._arg_keywords = [] prefix_counts = {} # pylint: enable=protected-access num_positional = 0 for arg in graph_function.graph.inputs: if arg in captured: break num_positional += 1 user_arg_name = compat.as_str(arg.op.get_attr("_user_specified_name")) proposal = user_arg_name while proposal in seen_names: index = prefix_counts.get(user_arg_name, 1) proposal = "{}_{}".format(user_arg_name, index) prefix_counts[user_arg_name] = index + 1 seen_names.add(proposal) graph_function._arg_keywords.append(proposal) # pylint: disable=protected-access # Anything can be a positional argument, in the same order as .inputs graph_function._num_positional_args = num_positional # pylint: disable=protected-access return graph_function def __get__(self, instance, owner): """Makes it possible to defun instance methods.""" del owner # `instance` here is the instance that this `Function` was accessed through # e.g., for # # class Foo(object): # # @function.defun # def bar(self): # ... # # foo = Foo() # foo.bar() # `foo.bar` is a `Function` instance # # then `instance` will be `foo` (and `owner` will be `Foo`). We create a # new instance of `Function` here to allow different instances each # to create variables once, thereby allowing methods to be decorated with # defun. Keeps a cache to avoid retracing the function every time the # descriptor is accessed. if instance not in self._descriptor_cache: if instance is None: return self # If there is no instance-specific `Function` in the cache, we construct # an instance-specific `Function` that uses a weak reference to the # instance (so that the instance will be correctly gc'd). # And finally add the wrapped function to the description cache self._descriptor_cache[instance] = class_method_to_instance_method( self, instance) # Return the cached `Function` for the instance return self._descriptor_cache[instance] def _cache_key(self, args, kwargs, include_tensor_ranks_only=False): """Computes the cache key given inputs and execution context.""" if self.input_signature is None: inputs = (args, kwargs) if kwargs else args input_signature = pywrap_tensorflow.TFE_Py_EncodeArg( inputs, include_tensor_ranks_only) else: del args, kwargs assert not include_tensor_ranks_only input_signature = self.flat_input_signature ctx = context.context() # Don't need to open an init_scope if the _cache_key call is in eager mode # already. executing_eagerly = ctx.executing_eagerly() parent_graph = None if not executing_eagerly: with ops.init_scope(): # The graph, or whether we're executing eagerly, should be a part of the # cache key so we don't improperly capture tensors such as variables. executing_eagerly = ctx.executing_eagerly() parent_graph = None if executing_eagerly else ops.get_default_graph() # pylint: disable=protected-access default_graph = ops.get_default_graph() # TODO(b/117617952): The current distribution strategy will affect graph # building (e.g. accessing different variables from different devices) and # so requires retracing for each device. strategy_stack = default_graph._distribution_strategy_stack uses_distribution_strategy = ( strategy_stack and strategy_stack[-1].strategy.extended._retrace_functions_for_each_device ) if executing_eagerly: colocation_stack = () if uses_distribution_strategy: device_functions = (pydev.merge_device(ctx.device_name),) else: device_functions = () else: colocation_stack = tuple(default_graph._colocation_stack.peek_objs()) if (uses_distribution_strategy or func_graph_module.device_stack_has_callable( default_graph._device_function_stack)): # Putting the device in the cache key ensures that call-site device # annotations are respected. device_functions = tuple(default_graph._device_functions_outer_to_inner) else: device_functions = () in_cross_replica_context = False try: in_cross_replica_context = (strategy_stack[-1].replica_context is None) # pylint: disable=protected-access except (AttributeError, IndexError): pass return CacheKey(input_signature, parent_graph, device_functions, colocation_stack, in_cross_replica_context) def _create_graph_function(self, args, kwargs, override_flat_arg_shapes=None): """Create a `ConcreteFunction` from `args` and `kwargs`.""" if self.input_signature is None: arglen = len(args) else: arglen = len(self.input_signature) base_arg_names = self._function_spec.arg_names[:arglen] num_missing_args = arglen - len(self._function_spec.arg_names) missing_arg_names = [self._function_spec.vararg_name] * num_missing_args # Produce a list of missing args of the form ["arg_0", "arg_1", ...], # where arg is based on the self._function_spec.vararg_name. missing_arg_names = [ "%s_%d" % (arg, i) for i, arg in enumerate(missing_arg_names) ] arg_names = base_arg_names + missing_arg_names graph_function = ConcreteFunction( func_graph_module.func_graph_from_py_func( self._name, self._python_function, args, kwargs, self.input_signature, autograph=self._autograph, autograph_options=self._autograph_options, arg_names=arg_names, override_flat_arg_shapes=override_flat_arg_shapes, capture_by_value=self._capture_by_value), self._function_attributes, # Tell the ConcreteFunction to clean up its graph once it goes out of # scope. This is not the default behavior since it gets used in some # places (like Keras) where the FuncGraph lives longer than the # ConcreteFunction. shared_func_graph=False) return graph_function def _define_function_with_shape_relaxation(self, args, kwargs): """Define a function, relaxing arg shapes to avoid unecessary retracing.""" rank_only_cache_key = self._cache_key( args, kwargs, include_tensor_ranks_only=True) arg_shapes = _flat_shape_list(args, kwargs) relaxed_arg_shapes = self._function_cache.arg_relaxed_shapes.get( rank_only_cache_key, None) relaxed_arg_function = self._function_cache.arg_relaxed.get( rank_only_cache_key, None) if (relaxed_arg_function is not None and _compatible_shapes(flat_relaxed=relaxed_arg_shapes, flat_to_check=arg_shapes)): return relaxed_arg_function, args, kwargs if relaxed_arg_shapes is None: relaxed_arg_shapes = arg_shapes else: if len(arg_shapes) != len(relaxed_arg_shapes): raise RuntimeError("Expected arg_shapes len to match " "relaxed_arg_shapes len: %d vs. %d" % (len(arg_shapes), len(relaxed_arg_shapes))) relaxed_arg_shapes = [ common_shape(x, y) for (x, y) in zip( arg_shapes, relaxed_arg_shapes)] self._function_cache.arg_relaxed_shapes[rank_only_cache_key] = ( relaxed_arg_shapes) graph_function = self._create_graph_function( args, kwargs, override_flat_arg_shapes=relaxed_arg_shapes) self._function_cache.arg_relaxed[rank_only_cache_key] = graph_function return graph_function, args, kwargs def _maybe_define_function(self, args, kwargs): """Gets a function for these inputs, defining it if necessary. `args` and `kwargs` can be None if this `Function` was created with an `input_signature`. Args: args: The varargs for the Python function. kwargs: The keyword args for the Python function. Returns: A graph function corresponding to the input signature implied by args and kwargs, as well as the inputs that the object should be called with. Raises: ValueError: If inputs are incompatible with the input signature. TypeError: If the function inputs include non-hashable objects RuntimeError: If there's an internal bug (inconsistency) in handling shape relaxation retracing. """ if self.input_signature is None or args is not None or kwargs is not None: args, kwargs = self._function_spec.canonicalize_function_inputs( *args, **kwargs) cache_key = self._cache_key(args, kwargs) try: hash(cache_key) except TypeError as e: raise TypeError( "Arguments supplied to `defun`-generated functions must be" " hashable. Original error: %s" % e) with self._lock: graph_function = self._function_cache.primary.get(cache_key, None) if graph_function is not None: return graph_function, args, kwargs logging.vlog(1, "Creating new FuncGraph for Python function %r (key: %r)", self._python_function, cache_key) logging.vlog(2, "Python function signature [args: %s] [kwargs: %s]", args, kwargs) call_context_key = cache_key.replace(input_signature=None) ag_status = ( ag_ctx.Status.ENABLED if self._autograph else ag_ctx.Status.DISABLED) with ag_ctx.ControlStatusCtx( status=ag_status, options=self._autograph_options): # Build a function with shape relaxation retracing if: # 1. shape relaxation is explicitly enabled # and 2. there's no provided input signature # and 3. there's been a cache miss for this calling context if (self._experimental_relax_shapes and self.input_signature is None and call_context_key in self._function_cache.missed): return self._define_function_with_shape_relaxation(args, kwargs) self._function_cache.missed.add(call_context_key) graph_function = self._function_cache.primary.get(cache_key, None) if graph_function is None: graph_function = self._create_graph_function(args, kwargs) self._function_cache.primary[cache_key] = graph_function return graph_function, args, kwargs def register(func, *args, **kwargs): """Register a specialization of a `Function` into the graph. This won't actually call the function with the inputs, and only put the function definition into graph. Register function with different input param will result into multiple version of functions registered in graph. Args: func: the `Function` instance that generated by a @defun *args: input arguments for the Python function. **kwargs: input keyword arguments for the Python function. Returns: a `ConcreteFunction` object specialized to inputs and execution context. Raises: ValueError: When the input function is not a defun wrapped python function. """ if not isinstance(func, Function): raise ValueError("Only defun function is allowed to be registered. " "Got type: %s" % type(func)) concrete_func = func.get_concrete_function(*args, **kwargs) concrete_func.add_to_graph() concrete_func.add_gradient_functions_to_graph() return concrete_func def validate_signature(signature): if any(not isinstance(arg, tensor_spec.TensorSpec) for arg in nest.flatten(signature, expand_composites=True)): raise TypeError("Invalid input_signature {}; input_signature must be " "a possibly nested sequence of TensorSpec objects." .format(signature)) def defun(func=None, input_signature=None, autograph=True, experimental_autograph_options=None, experimental_relax_shapes=False): """Compiles a Python function into a callable TensorFlow graph. `defun` (short for "define function") compiles a Python function composed of TensorFlow operations into a callable that executes a `tf.Graph` containing those operations. The callable produced by `defun` contains only the subgraph of TensorFlow operations that were executed when the Python function was called with a particular input signature, defined as a list of the shapes and dtypes of the Python function's Tensor-valued arguments and the values of its non-Tensor Python objects. When eager execution is enabled, the ability to create graphs from Python functions makes it possible to incrementally trade off debugability and interactivity for performance. Functions compiled with `defun` cannot be inspected with `pdb`; however, executing a graph generated by `defun` sometimes takes less time and memory than eagerly executing the corresponding Python function, since specifying computations as graphs allows for optimizations like automatic buffer reuse and parallelization among ops. Note that executing a `defun`-compiled function incurs a small constant overhead, so eagerly executing sufficiently small Python functions might take less time than executing their corresponding `defun`-generated graphs. For a Python function to be compatible with `defun`, all of its arguments must be hashable Python objects or lists thereof. The function itself may not modify the list/map structure of its arguments. Additionally, it must return zero or more `tf.Tensor` objects. If the Python function returns a `tf.Variable`, its compiled version will return the value of that variable as a `tf.Tensor`. Executing a graph generated by `defun` respects device annotations (i.e., all `with tf.device` directives present in a Python function will also be present in its corresponding graph), but it is not yet possible to execute the generated graphs across multiple machines. _Example Usage_ ```python import tensorflow as tf tf.compat.v1.enable_eager_execution() # A simple example. def f(x, y): return tf.reduce_mean(tf.multiply(x ** 2, 3) + y) g = tf.contrib.eager.defun(f) x = tf.constant([[2.0, 3.0]]) y = tf.constant([[3.0, -2.0]]) # `f` and `g` will return the same value, but `g` will be executed as a # TensorFlow graph. assert f(x, y).numpy() == g(x, y).numpy() # `defun` is capable of compiling Python functions that close over Python # objects, including Tensors and Variables. @tf.contrib.eager.defun def h(): return f(x, y) assert (h().numpy() == f(x, y).numpy()).all() # `defun` automatically lifts variables out of the graphs it creates, # allowing you to compile the `call` methods of `tf.keras.layers.Layer` and # `tf.keras.Model` objects. class MyModel(tf.keras.Model): def __init__(self, keep_probability=0.2): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.keep_probability = keep_probability @tf.contrib.eager.defun def call(self, inputs, training=True): x = self.dense2(self.dense1(inputs)) if training: return tf.nn.dropout(x, self.keep_probability) else: return x model = MyModel() model(x, training=True) # executes a graph, with dropout model(x, training=False) # executes a graph, without dropout # `defun`-compiled functions are differentiable. optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01) with tf.GradientTape() as tape: outputs = model(x) gradient = tape.gradient(outputs, model.trainable_variables) optimizer.apply_gradients((grad, var) for grad, var in zip(gradient, model.trainable_variables)) ``` When using `defun`, there are subtleties regarding inputs, Python control flow, and variable creation that one should be aware of. For concreteness, let `f` be a Python function that returns zero or more `tf.Tensor` objects and let `F = defun(f)`. `F` builds a graph for each unique input signature it sees, Python control flow is baked into graphs, and operations related to variable initialization are automatically lifted out of the graphs that `F` generates and placed in the eager context if executing eagerly or into an outer graph otherwise. _Input Signatures_ By default, `F = tf.contrib.eager.defun(f)` instantiates a separate graph for every unique sequence of the shapes and dtypes of Tensor arguments and the values of Python objects it is invoked with. For example, calling `F(tf.random.uniform([2])` will execute a different graph than `F(tf.random.uniform([3])` because the two inputs have different shapes. The first time that `F(*args, **kwargs)` is called with a particular sequence of Tensor shapes and dtypes and Python values, it constructs a graph by tracing the execution of `f(*args, **kwargs)`; this graph is bound to an input signature inferred from `(*args, **kwargs)` and cached for future reuse. NumPy arrays passed as inputs to `F` are converted to `tf.Tensor` objects before being passed to `f`, and are treated as Tensors for caching. This allows a function to be called multiple times with NumPy arrays having different values but the same shape and dtype without re-tracing each time. `tf.contrib.eager.defun` caches graphs for your convenience, letting you define TensorFlow functions without explicitly specifying their signatures. However, this policy is conservative and potentially expensive; for example, when different invocations of your function have differently-shaped Tensor inputs, this policy might generate more graph functions than necessary. To eliminate such costs, `tf.contrib.eager.defun` allows you to supply an optional `input_signature` argument specifying the shapes and dtypes of the inputs. In particular, the shapes may be partially unspecified, with `None`s in the unknown dimensions. When an input signature is provided, `tf.contrib.eager.defun` will only instantiate a single graph for the decorated Python function. The following is an example: ```python import tensorflow as tf # The first `TensorSpec` below describes the shape and dtype of `words`, # and the second describes the shape and dtype of `another_tensor`. Note that # the last dimension of the `words` `TensorSpec` is left unspecified. @tf.contrib.eager.defun(input_signature=[ tf.contrib.eager.TensorSpec(shape=[50, 300, None], dtype=tf.float32), tf.contrib.eager.TensorSpec(shape=[300, 100], dtype=tf.float32) ]) def my_sequence_model(words, another_tensor): ... # Note how the third dimension of the first input can vary freely. words = tf.random.uniform(([50, 300, 10]) second_input = tf.random.uniform([300, 100]) my_sequence_model(words, second_input) words = tf.random.uniform(([50, 300, 20]) my_sequence_model(words, second_input) # Passing an input with an incompatible shape will raise an error. words = tf.random.uniform(([50, 100, 20]) my_sequence_model(words, second_input) # <---- This will raise an error. ``` Python functions that are compiled with an `input_signature` must only accept Tensors as arguments and must not take unnamed keyword arguments (**kwargs). _Tracing_ Be aware that because `F` only logs TensorFlow operations, all the other Python code that `f` executes will only shape the _construction_ of the graphs that `F` executes: the Python code won't be executed when the graphs themselves are executed, though it will be executed every time the Python function is traced (and a given Python function might be traced multiple times, once for each input signature it is invoked with). For example, whereas the Python function ```python import tensorflow as tf import numpy as np tf.compat.v1.enable_eager_execution() def add_noise(): return tf.eye(5) + np.random.randn(5, 5) ``` will return a different output everytime it is invoked, the compiled function `compiled = tf.contrib.eager.defun(add_noise)` will return the same value every time it is called, since a particular random offset generated by NumPy will be inserted into the graph as a TensorFlow constant. The solution is to replace the call to `np.random.randn` with `tf.random.normal((5, 5))`. _Python Side-Effects_ A corollary of the previous discussion on tracing is the following: If a Python function `f` has Python side-effects, then executing `f` multiple times will not necessarily be semantically equivalent to executing `F = tf.contrib.eager.defun(f)` multiple times; this difference is due to the fact that `defun` only captures the subgraph of TensorFlow operations that is constructed when `f` is called in a graph-building context. _Python Control Flow_ The structure of many machine learning computations depend upon whether one is training or validating, and it is common to nest specialized logic under `if training:` blocks. By mapping each input signature to a unique graph, `defun` lets users transparently compile such code, as the following code snippet demonstrates: ```python import tensorflow as tf tf.compat.v1.enable_eager_execution() @tf.contrib.eager.defun def lossy_matmul(W, x, training=True): outputs = tf.matmul(W, x) if training: outputs = tf.nn.dropout(outputs, keep_probability=0.2) return outputs W = tf.random.normal((3, 5)) x = tf.random.normal((5, 1)) # Executes a graph that applies dropout. lossy_outputs = lossy_matmul(W, x, training=True) # Executes a graph that does not apply dropout. exact_outputs = lossy_matmul(W, x, training=False) ``` _TensorFlow Control Flow_ When `autograph` is `True`, data-dependent control flow is allowed as well. Control flow statements that depend on `Tensor` values are staged into corresponding TensorFlow ops. For example, the following code will work as expected: ```python @tf.contrib.eager.defun def dynamic_rnn_loop(cell, seq): state, output = cell.zero_state() for input in seq: state, output = cell(input, state) return output ``` For more information see `tf.autograph`. _Variables_ TensorFlow operations related to variable creation and initialization are automatically lifted out of the graphs generated by `defun`. In practice, this implies that variable creation and initialization only happen the first time `F` is called, and that variables are reused every time thereafter. Many TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the first time they are called and reuse them thereafter. Automatic variable lifting makes it possible to compile these APIs without extra effort, at the cost of introducing a discrepancy between the semantics of executing Python functions and their corresponding compiled functions. For example: ```python import tensorflow as tf tf.compat.v1.enable_eager_execution() def fn(): x = tf.Variable(0.0) x.assign_add(1.0) return x.read_value() # `fn` is a Python function, so x is created, initialized, and destroyed upon # every invocation assert fn().numpy() == fn().numpy() == 1.0 compiled = tf.contrib.eager.defun(fn) # Compiling `fn` with `defun` hoists all variables outside of the generated # graph, so initialization happens exactly once. assert compiled().numpy() == 1.0 assert compiled().numpy() == 2.0 ``` Finally, because each input signature is bound to a unique graph, if your Python function constructs `tf.Variable` objects, then each graph constructed for that Python function will reference a unique set of variables. To circumvent this problem, we recommend against compiling Python functions that create `tf.Variable` objects. Instead, Python functions should either lexically close over `tf.Variable` objects or accept them as arguments, preferably encapsulated in an object-oriented container. If you must create variables inside your Python function and you want each graph generated for it to reference the same set of variables, add logic to your Python function that ensures that variables are only created the first time it is called and are reused for every subsequent invocation; note that this is precisely what `tf.keras.layers.Layer` objects do, so we recommend using them to represent variable-bearing computations whenever possible. Args: func: function to be compiled. If `func` is None, returns a decorator that can be invoked with a single argument - `func`. The end result is equivalent to providing all the arguments up front. In other words, defun(input_signature=...)(func) is equivalent to defun(func, input_signature=...). The former allows the following use case: @tf.contrib.eager.defun(input_signature=...) def foo(...): ... input_signature: A possibly nested sequence of `tf.contrib.eager.TensorSpec` objects specifying the shapes and dtypes of the Tensors that will be supplied to this function. If `None`, a separate function is instantiated for each inferred input signature. If a signature is specified, every input to `func` must be a `Tensor`, and `func` cannot accept `**kwargs`. autograph: Whether `func` should be compiled before constructing the graph. See https://www.tensorflow.org/guide/autograph for more information. experimental_autograph_options: Experimental knobs (in the form of a tuple of tensorflow.autograph.Feature values) to control behavior when autograph=True. experimental_relax_shapes: When true, argument shapes may be relaxed to avoid unecessary retracing. Returns: If `func` is not None, returns a callable that will execute the compiled function (and return zero or more `tf.Tensor` objects). If `func` is None, returns a decorator that, when invoked with a single `func` argument, returns a callable equivalent to the case above. Raises: TypeError: If `input_signature` is neither `None` nor a sequence of `tf.contrib.eager.TensorSpec` objects. """ return defun_with_attributes( func=func, input_signature=input_signature, autograph=autograph, experimental_autograph_options=experimental_autograph_options, experimental_relax_shapes=experimental_relax_shapes) def defun_with_attributes(func=None, input_signature=None, attributes=None, autograph=True, experimental_autograph_options=None, experimental_relax_shapes=False): """Compiles a Python function into a callable TensorFlow graph. This function supports adding extra function attributes. See detailed documentation in defun(). Currently this is not exposed in public API since we don't expect user to directly use attributes, and attribute won't work by itself. This assumption might change in future. Args: func: function to be compiled. input_signature: same as defun()'s input_signature. attributes: A dictionary of arguments which will be added to function def as attributes. Currently only support primitive types as value, and only whitelisted attribute name is allowed. Unwhitelisted attribute name or unsupported value will result into ValueError. `func_name` is also one of the whitelisted argument which is a python string, and sets the name for this `ConcreteFunction` in the graph. autograph: same as defun()'s autograph. experimental_autograph_options: same as defun()'s experimental_autograph_options. experimental_relax_shapes: same as defun()'s experimental_relax_shapes Returns: Same as the return value of defun, with attributes added to the function in graph. """ if input_signature is not None: validate_signature(input_signature) # TODO(apassos): deal with captured global state. Deal with control flow. def decorated(function): try: if attributes: name = attributes.pop("func_name", function.__name__) else: name = function.__name__ except AttributeError: name = "function" return tf_decorator.make_decorator( function, Function( function, name, input_signature=input_signature, attributes=attributes, autograph=autograph, autograph_options=experimental_autograph_options, experimental_relax_shapes=experimental_relax_shapes)) # This code path is for the `foo = tfe.defun(foo, ...)` use case if func is not None: return decorated(func) # This code path is for the # # @tfe.defun(...) # def foo(...): # ... # # use case, which is equivalent to `foo = tfe.defun(...)(foo)` return decorated # When a method is bound to objects of this type, it allows AutoGraph to # recover a weak reference the original method's self pointer, so that it can # execute it consistent with class_method_to_instance_method's # bound_method_wrapper. # TODO(b/119246461): This is not pretty. Use a descriptor instead? class TfMethodTarget(object): """Binding target for methods replaced by function and defun.""" def __init__(self, target, original_python_function): self.weakrefself_target__ = target self.weakrefself_func__ = weakref.ref(original_python_function) @property def target(self): return self.weakrefself_target__() def call(self, args, kwargs): wrapped_fn = self.weakrefself_func__() if tf_inspect.ismethod(wrapped_fn): wrapped_fn = six.get_unbound_function(wrapped_fn) return wrapped_fn(self.weakrefself_target__(), *args, **kwargs) def class_method_to_instance_method(original_function, instance): """Constructs a new `Function` with `self` bound.""" weak_instance = weakref.ref(instance) # Note: while we could bind to a weakref proxy instead, that causes the # bound method to be unhashable. bound_method = types_lib.MethodType( original_function.python_function, TfMethodTarget(weak_instance, original_function.python_function)) # original_function is expected to be of one of the two `Function` types # (defined either in function.py or def_function.py). assert hasattr(original_function, "_name") assert hasattr(original_function, "_autograph") assert hasattr(original_function, "_function_spec") assert hasattr(original_function, "python_function") weak_bound_method_wrapper = None def bound_method_wrapper(*args, **kwargs): """Wraps either a dummy MethodType or a converted AutoGraph function.""" # __wrapped__ allows AutoGraph to swap in a converted function. strong_bound_method_wrapper = weak_bound_method_wrapper() wrapped_fn = strong_bound_method_wrapper.__wrapped__ if wrapped_fn is strong_bound_method_wrapper.__original_wrapped__: # If __wrapped__ was not replaced, then call original_function. # TODO(mdan): For better consistency, use the wrapper's call(). wrapped_fn = original_function.python_function if tf_inspect.ismethod(wrapped_fn): wrapped_fn = six.get_unbound_function(wrapped_fn) return wrapped_fn(weak_instance(), *args, **kwargs) # If __wrapped__ was replaced, then it is always an unbound function. # However, the replacer is still responsible for attaching self properly. # TODO(mdan): Is it possible to do it here instead? return wrapped_fn(*args, **kwargs) weak_bound_method_wrapper = weakref.ref(bound_method_wrapper) # pylint: disable=protected-access # We make a dummy MethodType object to generate the correct bound method # signature. The actual call is to a function with a weak reference to # `instance`. instance_func = type(original_function)( tf_decorator.make_decorator(bound_method, bound_method_wrapper), name=original_function._name, autograph=original_function._autograph, input_signature=original_function.input_signature) # pylint: enable=protected-access # And we wrap the function with tf_decorator so inspection works correctly wrapped_instance_func = tf_decorator.make_decorator( original_function.python_function, instance_func) return wrapped_instance_func class _FunctionGarbageCollector(object): """Cleans up cycles when a defun goes out of scope.""" def __init__(self, cache): self._cache = cache def __del__(self): if func_graph_module is None or memory is None: return try: while self._cache: self._cache.popitem() memory.dismantle_ordered_dict(self._cache) except: # pylint: disable=bare-except pass class ConcreteFunctionGarbageCollector(object): """Cleans up reference cycles when a `ConcreteFunction` goes out of scope.""" def __init__(self, func_graph): self._func_graph = func_graph def release(self): """Call off the FuncGraph deletion.""" self._func_graph = None def __del__(self): if func_graph_module is None or memory is None or self._func_graph is None: return try: func_graph_module.dismantle_func_graph(self._func_graph) except: # pylint: disable=bare-except pass
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/function.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Executor for eager execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow class Executor(object): """A class for handling eager execution. The default behavior for asynchronous execution is to serialize all ops on a single thread. Having different `Executor` objects in different threads enables executing ops asynchronously in parallel: ```python def thread_function(): executor = executor.Executor(enable_async=True): context.set_executor(executor) a = threading.Thread(target=thread_function) a.start() b = threading.Thread(target=thread_function) b.start() ``` """ def __init__(self, handle): self._handle = handle def __del__(self): try: # pywrap_tensorflow.TFE_ExecutorWaitForAllPendingNodes(self._handle) pywrap_tensorflow.TFE_DeleteExecutor(self._handle) except TypeError: # Suppress some exceptions, mainly for the case when we're running on # module deletion. Things that can go wrong include the pywrap module # already being unloaded, self._handle. no longer being # valid, and so on. Printing warnings in these cases is silly # (exceptions raised from __del__ are printed as warnings to stderr). pass # 'NoneType' object is not callable when the handle has been # partially unloaded. def is_async(self): return pywrap_tensorflow.TFE_ExecutorIsAsync(self._handle) def handle(self): return self._handle def wait(self): """Waits for ops dispatched in this executor to finish.""" pywrap_tensorflow.TFE_ExecutorWaitForAllPendingNodes(self._handle) def clear_error(self): """Clears errors raised in this executor during execution.""" pywrap_tensorflow.TFE_ExecutorClearError(self._handle) def new_executor(enable_async): handle = pywrap_tensorflow.TFE_NewExecutor(enable_async) return Executor(handle)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/executor.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utils for memory tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import six from tensorflow.python.eager import context # memory_profiler might not be available in the OSS version of TensorFlow. try: import memory_profiler # pylint:disable=g-import-not-at-top except ImportError: memory_profiler = None def assert_no_leak(f, num_iters=100000, increase_threshold_absolute_mb=10): """Assert memory usage doesn't increase beyond given threshold for f.""" with context.eager_mode(): # Warm up. f() # Wait for background threads to start up and take over memory. # FIXME: The nature of this test leaves few other options. Maybe there # is a better way to do this. time.sleep(4) initial = memory_profiler.memory_usage(-1)[0] for _ in six.moves.range(num_iters): f() increase = memory_profiler.memory_usage(-1)[0] - initial assert increase < increase_threshold_absolute_mb, ( "Increase is too high. Initial memory usage: %f MB. Increase: %f MB. " "Maximum allowed increase: %f") % (initial, increase, increase_threshold_absolute_mb) def memory_profiler_is_available(): return memory_profiler is not None
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/memory_tests/memory_test_util.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for memory leaks in remote eager execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.eager import def_function from tensorflow.python.eager import remote from tensorflow.python.eager import test from tensorflow.python.eager.memory_tests import memory_test_util from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.training import server_lib class RemoteWorkerMemoryTest(test.TestCase): def __init__(self, method): super(RemoteWorkerMemoryTest, self).__init__(method) # used for remote worker tests os.environ["TF_EAGER_REMOTE_USE_SEND_TENSOR_RPC"] = "1" self._cached_server = server_lib.Server.create_local_server() self._cached_server_target = self._cached_server.target[len("grpc://"):] def testMemoryLeakInLocalCopy(self): if not memory_test_util.memory_profiler_is_available(): self.skipTest("memory_profiler required to run this test") remote.connect_to_remote_host(self._cached_server_target) # Run a function locally with the input on a remote worker and ensure we # do not leak a reference to the remote tensor. @def_function.function def local_func(i): return i def func(): with ops.device("job:worker/replica:0/task:0/device:CPU:0"): x = array_ops.zeros([1000, 1000], dtypes.int32) local_func(x) memory_test_util.assert_no_leak( func, num_iters=100, increase_threshold_absolute_mb=50) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/memory_tests/remote_memory_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for memory leaks in eager execution. It is possible that this test suite will eventually become flaky due to taking too long to run (since the tests iterate many times), but for now they are helpful for finding memory leaks since not all PyObject leaks are found by introspection (test_util decorators). Please be careful adding new tests here. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import keras from tensorflow.python.eager import backprop from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.eager.memory_tests import memory_test_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops.variables import Variable class SingleLayerNet(keras.Model): """Simple keras model used to ensure that there are no leaks.""" def __init__(self): super(SingleLayerNet, self).__init__() self.fc1 = keras.layers.Dense(5) def call(self, x): return self.fc1(x) class MemoryTest(test.TestCase): def testMemoryLeakAnonymousVariable(self): if not memory_test_util.memory_profiler_is_available(): self.skipTest("memory_profiler required to run this test") def f(): inputs = Variable(array_ops.zeros([32, 100], dtypes.float32)) del inputs memory_test_util.assert_no_leak(f, num_iters=10000) def testMemoryLeakInSimpleModelForwardOnly(self): if not memory_test_util.memory_profiler_is_available(): self.skipTest("memory_profiler required to run this test") inputs = array_ops.zeros([32, 100], dtypes.float32) net = SingleLayerNet() def f(): with backprop.GradientTape(): net(inputs) memory_test_util.assert_no_leak(f) def testMemoryLeakInSimpleModelForwardAndBackward(self): if not memory_test_util.memory_profiler_is_available(): self.skipTest("memory_profiler required to run this test") inputs = array_ops.zeros([32, 100], dtypes.float32) net = SingleLayerNet() def f(): with backprop.GradientTape() as tape: result = net(inputs) tape.gradient(result, net.variables) del tape memory_test_util.assert_no_leak(f) def testMemoryLeakInFunction(self): if not memory_test_util.memory_profiler_is_available(): self.skipTest("memory_profiler required to run this test") def f(): @def_function.function def graph(x): return x * x + x graph(constant_op.constant(42)) memory_test_util.assert_no_leak( f, num_iters=1000, increase_threshold_absolute_mb=30)
tensorflow-r1.15.5-nv23.03
tensorflow/python/eager/memory_tests/memory_test.py
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for python.compiler.xla.xla.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python import summary from tensorflow.python.compiler.xla import xla from tensorflow.python.eager import def_function from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_util from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test from tensorflow.python.tpu import tpu_feed _TRAIN = model_fn_lib.ModeKeys.TRAIN _EVAL = model_fn_lib.ModeKeys.EVAL _EXPECTED_LOSS = 1 _EXPECTED_FEATURE = 2 _EXPECTED_LABEL = 3 class XLACompileContextTest(test.TestCase, parameterized.TestCase): def create_test_xla_compile_context(self): computation_name = ops.get_default_graph().unique_name('computation') pivot = control_flow_ops.no_op(name=computation_name + '/pivot') return xla.XLACompileContext(name=computation_name, pivot=pivot) @test_util.run_v1_only('Testing graph mode behavior only') def test_report_unsupported_operations_graph_mode(self): """Tests that unsupported operations are detected.""" context = self.create_test_xla_compile_context() context.Enter() dummy_tensor = constant_op.constant(1.1) audio_summary = summary.audio('audio_summary', dummy_tensor, 0.5) histogram_summary = summary.histogram('histogram_summary', dummy_tensor) image_summary = summary.image('image_summary', dummy_tensor) scalar_summary = summary.scalar('scalar_summary', dummy_tensor) tensor_summary = summary.tensor_summary('tensor_summary', dummy_tensor) summary.merge( [ audio_summary, histogram_summary, image_summary, scalar_summary, tensor_summary ], name='merge_summary') logging_ops.Print(dummy_tensor, [dummy_tensor], name='print_op') context.Exit() unsupported_ops_names = [op.name for op in context._unsupported_ops] self.assertEqual(unsupported_ops_names, [ u'audio_summary', u'histogram_summary', u'image_summary', u'scalar_summary', u'tensor_summary', u'merge_summary/merge_summary', u'print_op' ]) @test_util.run_v1_only('Testing graph mode behavior only') def test_resource_variable_graph_mode(self): """Tests that resource variable usage is allowed.""" a = variable_scope.get_variable( name='variable_a', use_resource=True, initializer=1) context = self.create_test_xla_compile_context() context.Enter() a.assign(2) context.Exit() def test_resource_variable_in_function(self): """Tests that resource variable usage is allowed.""" a = variable_scope.get_variable( name='variable_a', use_resource=True, initializer=1) @def_function.function def func(): context = self.create_test_xla_compile_context() context.Enter() o = a.assign(2) context.Exit() return o self.assertEqual(self.evaluate(func()), 2) @test_util.run_v1_only('Testing v1-only ref variable handling.') def test_non_resource_variable_error(self): """Tests that non-resource variable usage is disallowed.""" a = variable_scope.get_variable( name='variable_a', shape=(1), use_resource=False) context = self.create_test_xla_compile_context() context.Enter() with self.assertRaisesRegexp( NotImplementedError, 'Non-resource Variables are not supported inside ' r'XLA computations \(operator name: Assign\)'): state_ops.assign(a, a + 1) context.Exit() @test_util.build_as_function_and_v1_graph def test_nested_xla_compile_error(self): """Tests that nested XLA computation leads to fatal error.""" context1 = self.create_test_xla_compile_context() context1.Enter() context2 = self.create_test_xla_compile_context() context2.Enter() with self.assertRaisesRegexp(ValueError, 'XLA compiled computations cannot be nested'): constant_op.constant(1) context2.Exit() context1.Exit() @test_util.build_as_function_and_v1_graph def test_xla_compile_attr(self): """Tests that ops are tagged with XLA compile ID attribute.""" context = self.create_test_xla_compile_context() context.Enter() op = constant_op.constant(1) context.Exit() self.assertIn('_xla_compile_id', op.op.node_def.attr) @test_util.build_as_function_and_v1_graph def test_op_without_input(self): """Tests that ops without inputs depend on pivot correctly.""" context = self.create_test_xla_compile_context() context.Enter() op = constant_op.constant(1) context.Exit() self.assertIn(context._pivot, op.op.control_inputs) @test_util.run_v1_only('Testing graph mode behavior only') def test_external_control_edges_graph_mode(self): """Tests that external control edges are handled correctly.""" i = constant_op.constant(1) op1 = constant_op.constant(1) with ops.control_dependencies([op1]): op2 = constant_op.constant(1) self.assertIn(op1.op, op2.op.control_inputs) def while_body(i): del i # unused context = self.create_test_xla_compile_context() context.Enter() with ops.control_dependencies([op1]): op3 = constant_op.constant(1) context.Exit() self.assertNotIn(op1.op, op3.op.control_inputs) return op3 control_flow_ops.while_loop( cond=lambda i: math_ops.less(i, 10), body=while_body, loop_vars=[i]) @test_util.build_as_function_and_v1_graph def test_op_output_marked_as_seen(self): """Tests that any op output is marked as seen in context.""" context = self.create_test_xla_compile_context() context.Enter() op = constant_op.constant(1) context.Exit() self.assertIn(op.name, context._values) @test_util.build_as_function_and_v1_graph def test_op_is_in_context(self): """Tests that XLACompileContext is recognized as an XLA context.""" op1 = constant_op.constant(1) context = self.create_test_xla_compile_context() context.Enter() op2 = constant_op.constant(2) context.Exit() self.assertFalse(control_flow_util.IsInXLAContext(op1.op)) self.assertTrue(control_flow_util.IsInXLAContext(op2.op)) @test_util.build_as_function_and_v1_graph def test_op_prevent_feeding(self): """Tests that ops created inside XLACompileContext can not be fed.""" context = self.create_test_xla_compile_context() context.Enter() op = constant_op.constant(1) context.Exit() self.assertFalse(op.graph.is_feedable(op.op)) @test_util.build_as_function_and_v1_graph def test_op_prevent_fetching(self): """Tests that ops created inside XLACompileContext can not be fetched.""" context = self.create_test_xla_compile_context() context.Enter() op = constant_op.constant(1) context.Exit() self.assertFalse(op.graph.is_fetchable(op.op)) class XlaCompileTest(test.TestCase): @test_util.run_v2_only def test_xla_compile_eager(self): """Tests that xla.compile raises proper exception when used eagerly.""" def computation(a, b): return a + b self.assertEqual(self.evaluate(xla.compile(computation, [1, 2])[0]), 3) def test_xla_compile_in_function(self): """Tests that xla.compile works in tf.function.""" @def_function.function def func_wrapper(a): def compute(a): return a + 1 return xla.compile(compute, [a]) self.assertEqual(self.evaluate(func_wrapper(1))[0], 2) def test_xla_compile_write_variable_in_function(self): """Tests that xla.compile works with variable in tf.function.""" a = variable_scope.get_variable( name='variable_a', use_resource=True, initializer=1) @def_function.function def func_wrapper(): def compute(): a.assign_add(1) a.assign_sub(2) return a.read_value() return xla.compile(compute) self.evaluate(a.initializer) self.assertEqual(self.evaluate(func_wrapper())[0], 0) class CheckFunctionArgumentCountTest(test.TestCase): def test_simple(self): """Tests that arg checker works for functions with no varargs or defaults. """ def func(x, y, z): return x + y + z self.assertEqual(None, xla.check_function_argument_count(func, 3, None)) self.assertEqual('exactly 3 arguments', xla.check_function_argument_count(func, 2, None)) queue = tpu_feed.InfeedQueue(2) self.assertEqual(None, xla.check_function_argument_count(func, 1, queue)) self.assertEqual('exactly 3 arguments', xla.check_function_argument_count(func, 2, queue)) def test_default_args(self): """Tests that arg checker works for a function with no varargs.""" def func(x, y, z=17): return x + y + z self.assertEqual(None, xla.check_function_argument_count(func, 3, None)) self.assertEqual(None, xla.check_function_argument_count(func, 2, None)) self.assertEqual('at least 2 arguments', xla.check_function_argument_count(func, 1, None)) self.assertEqual('at most 3 arguments', xla.check_function_argument_count(func, 4, None)) queue = tpu_feed.InfeedQueue(1) self.assertEqual(None, xla.check_function_argument_count(func, 2, queue)) self.assertEqual(None, xla.check_function_argument_count(func, 1, queue)) self.assertEqual('at least 2 arguments', xla.check_function_argument_count(func, 0, queue)) self.assertEqual('at most 3 arguments', xla.check_function_argument_count(func, 4, queue)) def test_var_args(self): """Tests that arg checker works for a function with varargs.""" def func(x, y, *z): return x + y + len(z) self.assertEqual(None, xla.check_function_argument_count(func, 2, None)) self.assertEqual(None, xla.check_function_argument_count(func, 3, None)) self.assertEqual(None, xla.check_function_argument_count(func, 4, None)) self.assertEqual('at least 2 arguments', xla.check_function_argument_count(func, 1, None)) queue = tpu_feed.InfeedQueue(1) self.assertEqual(None, xla.check_function_argument_count(func, 1, queue)) self.assertEqual(None, xla.check_function_argument_count(func, 2, queue)) self.assertEqual(None, xla.check_function_argument_count(func, 3, queue)) self.assertEqual('at least 2 arguments', xla.check_function_argument_count(func, 0, queue)) def test_var_args_and_defaults(self): """Tests that arg checker works for a function with varargs and defaults.""" def func(x, y, z=17, *q): # pylint: disable=keyword-arg-before-vararg return x + y + z + len(q) self.assertEqual(None, xla.check_function_argument_count(func, 2, None)) self.assertEqual(None, xla.check_function_argument_count(func, 3, None)) self.assertEqual(None, xla.check_function_argument_count(func, 4, None)) self.assertEqual(None, xla.check_function_argument_count(func, 5, None)) self.assertEqual('at least 2 arguments', xla.check_function_argument_count(func, 1, None)) queue = tpu_feed.InfeedQueue(1) self.assertEqual(None, xla.check_function_argument_count(func, 1, queue)) self.assertEqual(None, xla.check_function_argument_count(func, 2, queue)) self.assertEqual(None, xla.check_function_argument_count(func, 3, queue)) self.assertEqual(None, xla.check_function_argument_count(func, 4, queue)) self.assertEqual('at least 2 arguments', xla.check_function_argument_count(func, 0, queue)) if __name__ == '__main__': test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/xla/xla_test.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library for controlling the Tensorflow/XLA JIT compiler.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.eager import context from tensorflow.python.framework import ops from tensorflow.python.util.tf_export import tf_export _XLA_SCOPE_KEY = ("__xla_scope",) class _XlaScope(object): """Keeps track of previous XLA scope calls, and depth of current call.""" def __init__(self, count, depth): self.count = count self.depth = depth @contextlib.contextmanager @tf_export("xla.experimental.jit_scope") def experimental_jit_scope(compile_ops=True, separate_compiled_gradients=False): """Enable or disable JIT compilation of operators within the scope. NOTE: This is an experimental feature. The compilation is a hint and only supported on a best-effort basis. Example usage: with tf.xla.experimental.jit_scope(): c = tf.matmul(a, b) # compiled with tf.xla.experimental.jit_scope(compile_ops=False): d = tf.matmul(a, c) # not compiled with tf.xla.experimental.jit_scope( compile_ops=lambda node_def: 'matmul' in node_def.op.lower()): e = tf.matmul(a, b) + d # matmul is compiled, the addition is not. Example of separate_compiled_gradients: # In the example below, the computations for f, g and h will all be compiled # in separate scopes. with tf.xla.experimental.jit_scope( separate_compiled_gradients=True): f = tf.matmul(a, b) g = tf.gradients([f], [a, b], name='mygrads1') h = tf.gradients([f], [a, b], name='mygrads2') Args: compile_ops: Whether to enable or disable compilation in the scope. Either a Python bool, or a callable that accepts the parameter `node_def` and returns a python bool. separate_compiled_gradients: If true put each gradient subgraph into a separate compilation scope. This gives fine-grained control over which portions of the graph will be compiled as a single unit. Compiling gradients separately may yield better performance for some graphs. The scope is named based on the scope of the forward computation as well as the name of the gradients. As a result, the gradients will be compiled in a scope that is separate from both the forward computation, and from other gradients. Raises: RuntimeError: if called when eager execution is enabled. Yields: The current scope, enabling or disabling compilation. """ if context.executing_eagerly(): raise RuntimeError("xla.experimental.jit_scope is not supported when eager " "execution is enabled. Try use it inside tf.function.") if callable(compile_ops): def xla_compile(node_def): return attr_value_pb2.AttrValue(b=compile_ops(node_def)) else: xla_compile = attr_value_pb2.AttrValue(b=compile_ops) attrs = { "_XlaCompile": xla_compile, "_XlaSeparateCompiledGradients": attr_value_pb2.AttrValue(b=bool(separate_compiled_gradients)) } # Find the singleton counter for the current scoped graph. If it # doesn't exist, create one. xla_scope_counter = ops.get_collection(_XLA_SCOPE_KEY) if not xla_scope_counter: xla_scope_counter = _XlaScope(0, 0) ops.add_to_collection(_XLA_SCOPE_KEY, xla_scope_counter) else: xla_scope_counter = xla_scope_counter[0] if xla_scope_counter.depth == 0: # If we're at the root xla scope, we can increase the counter so # future calls to jit_scope use a different scope value. # If we're already within a scope, we'll be fusing using the scope # controlled by the parent. attrs["_XlaScope"] = attr_value_pb2.AttrValue( s=("jit_scope_%d" % xla_scope_counter.count).encode()) xla_scope_counter.count += 1 xla_scope_counter.depth += 1 # pylint: disable=protected-access with ops.get_default_graph()._attr_scope(attrs): yield # pylint: enable=protected-access xla_scope_counter.depth -= 1
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/xla/jit.py
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A module for controlling the Tensorflow/XLA JIT compiler.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import from tensorflow.python.compiler.xla import jit from tensorflow.python.compiler.xla import xla # pylint: enable=unused-import
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/xla/__init__.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for python.compiler.xla.jit.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from tensorflow.python.compiler.xla import jit from tensorflow.python.framework import constant_op from tensorflow.python.framework import function from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import test_util from tensorflow.python.ops import gradients from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test # pylint: enable=g-import-not-at-top _REGISTERED_OPS = op_def_registry.get_registered_ops() def enable_jit_nonstateful(node_def): try: return not _REGISTERED_OPS[node_def.op].is_stateful except KeyError: raise ValueError("Unregistered op being created: %s" % node_def) class JITTest(test.TestCase, parameterized.TestCase): def compute(self, use_jit, compute_fn): random_seed.set_random_seed(1234) with self.session(graph=ops.Graph()) as sess: with jit.experimental_jit_scope(use_jit): r = compute_fn() sess.run(variables.global_variables_initializer()) return (r, sess.run(r)) @test_util.run_v2_only def testJITInEager(self): with self.assertRaisesRegexp( RuntimeError, "xla.experimental.jit_scope is not supported when eager " "execution is enabled. Try use it inside tf.function."): with jit.experimental_jit_scope(True): constant_op.constant(1) @test_util.build_as_function_and_v1_graph def testJITCreateOpsLambda(self): """Test several ways of customizing the compilation attribute.""" def create_ops(): with variable_scope.variable_scope( "root", initializer=init_ops.random_uniform_initializer( -0.1, 0.1, seed=2)): inputs = random_ops.random_uniform((1,), seed=1) return inputs v_false_1_t, v_false_1 = self.compute(False, create_ops) _, v_false_2 = self.compute(False, create_ops) v_true_1_t, v_true_1 = self.compute(enable_jit_nonstateful, create_ops) _, v_true_2 = self.compute(enable_jit_nonstateful, create_ops) v_all_true_t, _ = self.compute(True, create_ops) self.assertFalse(v_false_1_t.op.get_attr("_XlaCompile")) v_true_1_t_sampler_op = v_true_1_t.graph.get_operation_by_name( "root/random_uniform/RandomUniform") v_all_true_t_sampler_op = v_all_true_t.graph.get_operation_by_name( "root/random_uniform/RandomUniform") self.assertFalse(v_true_1_t_sampler_op.get_attr("_XlaCompile")) self.assertTrue(v_all_true_t_sampler_op.get_attr("_XlaCompile")) self.assertTrue(v_true_1_t.op.get_attr("_XlaCompile")) self.assertTrue(v_all_true_t.op.get_attr("_XlaCompile")) # Additionally ensure that where no JIT compilation happens on the # random_uniform op, the output values are identical to the case # where no JIT compilation happens anywhere. self.assertAllClose(v_false_1, v_false_2) self.assertAllClose(v_true_1, v_true_2) self.assertAllClose(v_false_1, v_true_1) @test_util.build_as_function_and_v1_graph def testJITXlaScope(self): with self.session(graph=ops.Graph()): with jit.experimental_jit_scope(True): # XlaScope 0 a1 = constant_op.constant(1) with jit.experimental_jit_scope(True): # XlaScope 1 a2 = constant_op.constant(1) with jit.experimental_jit_scope(True): # XlaScope still 1, depth 1 a3 = constant_op.constant(1) with jit.experimental_jit_scope(True): # XlaScope still 1, depth 2 a4 = constant_op.constant(1) # XlaScope still 1, depth 1 a5 = constant_op.constant(1) with jit.experimental_jit_scope(True): # XlaScope now 2, depth 0 a6 = constant_op.constant(1) self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1", a3.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1", a4.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1", a5.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_2", a6.op.get_attr("_XlaScope")) @test_util.build_as_function_and_v1_graph def testJITVariableSeed(self): """Test that the stateful initializer is not marked for compilation. XLA does not currently support seeded initialization and XLA initializers therefore return different values than non-XLA counterparts. Here we ensure that if we can disable JIT compilation for the initializers and get the same variable values as if no JIT compilation happened. """ def create_ops(): with variable_scope.variable_scope( "root", initializer=init_ops.random_uniform_initializer( -0.1, 0.1, seed=2)): inputs = variable_scope.get_variable("var", (1,)) return inputs _, v_false_1 = self.compute(False, create_ops) _, v_false_2 = self.compute(False, create_ops) _, v_true_1 = self.compute(enable_jit_nonstateful, create_ops) _, v_true_2 = self.compute(enable_jit_nonstateful, create_ops) self.assertAllClose(v_false_1, v_false_2) self.assertAllClose(v_true_1, v_true_2) self.assertAllClose(v_false_1, v_true_1) @test_util.build_as_function_and_v1_graph def testDefunNoJitScope(self): with self.session(graph=ops.Graph()): @function.Defun(compiled=True, noinline=True) def mulop(x1, x2): return x1 * x2 x = constant_op.constant(1.0) r = mulop(x, x) # Ensure the forward function is compiled. graph_def = r.graph.as_graph_def() func_attrs = graph_def.library.function[0].attr self.assertTrue(func_attrs["_XlaCompile"].b) # No enclosing jit scope so function sets its own value for _XlaScope. self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s) @test_util.build_as_function_and_v1_graph def testDefunInheritsJitScope(self): with self.session(graph=ops.Graph()): with jit.experimental_jit_scope(True): @function.Defun(compiled=True, noinline=True) def mulop(x1, x2): return x1 * x2 x = constant_op.constant(1.0) r = mulop(x, x) # Ensure the forward function is compiled. graph_def = r.graph.as_graph_def() func_attrs = graph_def.library.function[0].attr self.assertTrue(func_attrs["_XlaCompile"].b) # Ensure _XlaScope is inherited from enclosing context. self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s) class CompilationEnabledInGradientTest(test.TestCase, parameterized.TestCase): @test_util.build_as_function_and_v1_graph def testCompilationInGradient(self): with self.cached_session(): x = constant_op.constant([[3.]]) y_nc = math_ops.matmul(x, x, name="not_compiled") with jit.experimental_jit_scope(): y_c = math_ops.matmul(y_nc, y_nc, name="compiled") x_grads = gradients.gradients([y_c], [x])[0] operations = x.graph.get_operations() c_grad_ops = [ op for op in operations if "gradients/compiled" in op.name] nc_grad_ops = [ op for op in operations if "gradients/not_compiled" in op.name] self.assertGreater(len(c_grad_ops), 0) self.assertGreater(len(nc_grad_ops), 0) for cg in c_grad_ops: self.assertTrue(cg.get_attr("_XlaCompile")) for ncg in nc_grad_ops: with self.assertRaisesRegexp(ValueError, "[Nn]o attr named"): ncg.get_attr("_XlaCompile") # d/dx (x ** 4) = 4 * (x ** 3) self.assertAllClose([[108]], x_grads.eval()) @test_util.build_as_function_and_v1_graph def testCompilationGradientScopeNames(self): with self.session(graph=ops.Graph()): with jit.experimental_jit_scope(): # XlaScope 0 a1 = constant_op.constant([[1.]]) a1t = math_ops.matmul(a1, a1) with jit.experimental_jit_scope(): # XlaScope 1 a2 = constant_op.constant([[1.]]) a2t = math_ops.matmul(a2, a2) self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope")) grad_a1 = gradients.gradients(a1t, a1, name="GA")[0] grad_a2 = gradients.gradients(a2t, a2, name="GB")[0] grad_a1 = grad_a1.op.inputs[0] grad_a2 = grad_a2.op.inputs[0] self.assertTrue(grad_a1.op.get_attr("_XlaCompile")) self.assertTrue(grad_a2.op.get_attr("_XlaCompile")) self.assertEqual(b"jit_scope_0", grad_a1.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1", grad_a2.op.get_attr("_XlaScope")) @test_util.build_as_function_and_v1_graph def testCompilationSeparateGradientScopeNames(self): with self.session(graph=ops.Graph()): with jit.experimental_jit_scope(True, separate_compiled_gradients=True): # XlaScope 0 a1 = constant_op.constant([[1.]]) a1t = math_ops.matmul(a1, a1) with jit.experimental_jit_scope(True, separate_compiled_gradients=True): # XlaScope 1 a2 = constant_op.constant([[1.]]) a2t = math_ops.matmul(a2, a2) self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope")) grad_a1 = gradients.gradients(a1t, a1, name="GA")[0] grad_a2 = gradients.gradients(a2t, a2, name="GB")[0] grad_a1 = grad_a1.op.inputs[0] grad_a2 = grad_a2.op.inputs[0] self.assertTrue(grad_a1.op.get_attr("_XlaCompile")) self.assertTrue(grad_a2.op.get_attr("_XlaCompile")) self.assertEqual(b"jit_scope_0_grad_GA", grad_a1.op.get_attr("_XlaScope")) self.assertEqual(b"jit_scope_1_grad_GB", grad_a2.op.get_attr("_XlaScope")) @test_util.build_as_function_and_v1_graph def testPlaysNicelyWithDefun(self): with self.session(graph=ops.Graph()) as sess: with jit.experimental_jit_scope(True): @function.Defun(compiled=True, noinline=True) def mulop(x1, x2): return x1 * x2 x = constant_op.constant(1.0) r = mulop(x, x) g_r = gradients.gradients(r, x, name="GA")[0] # Ensure the forward function is compiled. graph_def = r.graph.as_graph_def() func_attrs = graph_def.library.function[0].attr self.assertTrue(func_attrs["_XlaCompile"].b) self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s) # Ensure the gradient (SymbolicGradient) is compiled, with the same # _XlaScope as the function itself. grad_op = g_r.op.inputs[0].op self.assertTrue(grad_op.get_attr("_XlaCompile")) self.assertEqual(b"jit_scope_0", grad_op.get_attr("_XlaScope")) # Ensure the ops run: grad(x1*x1) = 2*x1 self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r])) @test_util.build_as_function_and_v1_graph def testPlaysNicelyWithDefunSeparateGradientScope(self): with self.session(graph=ops.Graph()) as sess: with jit.experimental_jit_scope(True): @function.Defun( compiled=True, noinline=True, separate_compiled_gradients=True) def mulop(x1, x2): return x1 * x2 x = constant_op.constant(1.0) r = mulop(x, x) g_r = gradients.gradients(r, x, name="GA")[0] # Ensure the forward function is compiled. graph_def = r.graph.as_graph_def() func_attrs = graph_def.library.function[0].attr self.assertTrue(func_attrs["_XlaCompile"].b) self.assertEqual(b"jit_scope_0", func_attrs["_XlaScope"].s) # Ensure the gradient (SymbolicGradient) is compiled, with a different # _XlaScope from the function itself. grad_op = g_r.op.inputs[0].op self.assertTrue(grad_op.get_attr("_XlaCompile")) self.assertEqual(b"jit_scope_0_grad_GA", grad_op.get_attr("_XlaScope")) # Ensure the ops run: grad(x1*x1) = 2*x1 self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r])) if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/xla/jit_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """xla is an experimental library that provides XLA support APIs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.compiler.jit.ops import xla_ops from tensorflow.compiler.jit.ops import xla_ops_grad # pylint: disable=unused-import from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.distribute import summary_op_util from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export _XLA_COMPILE_ATTR = '_xla_compile_id' _MAX_WARNING_LINES = 5 # Operations that indicate some error in the users graph. For example, XLA # computation should not have any Placeholder op. _BLACKLISTED_OPS = set([ 'Placeholder', ]) # XLA doesn't currently support reading of intermediate tensors, thus some ops # are not supported. _UNSUPPORTED_OPS = set([ 'AudioSummary', 'AudioSummaryV2', 'HistogramSummary', 'ImageSummary', 'MergeSummary', 'Print', 'ScalarSummary', 'TensorSummary', 'TensorSummaryV2', ]) @tf_export('xla.experimental.compile') def compile(computation, inputs=None): # pylint: disable=redefined-builtin """Builds an operator that compiles and runs `computation` with XLA. NOTE: In eager mode, `computation` will have `@tf.function` semantics. Args: computation: A Python function that builds a computation to apply to the input. If the function takes n inputs, 'inputs' should be a list of n tensors. `computation` may return a list of operations and tensors. Tensors must come before operations in the returned list. The return value of `compile` is a list of tensors corresponding to the tensors from the output of `computation`. All `Operation`s returned from `computation` will be executed when evaluating any of the returned output tensors. inputs: A list of inputs or `None` (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. Returns: Same data structure as if computation(*inputs) is called directly with some exceptions for correctness. Exceptions include: 1) None output: a NoOp would be returned which control-depends on computation. 2) Single value output: A tuple containing the value would be returned. 3) Operation-only outputs: a NoOp would be returned which control-depends on computation. TODO(b/121383831): Investigate into removing these special cases. Raises: RuntimeError: if called when eager execution is enabled. """ if context.executing_eagerly(): @def_function.function def xla_compile_wrapper(): return _compile_internal(computation, inputs) return xla_compile_wrapper() return _compile_internal(computation, inputs) class XLACompileContext(control_flow_ops.XLAControlFlowContext): """A `ControlFlowContext` for nodes inside an XLA computation cluster. THIS IS ONLY FOR TENSORFLOW INTERNAL IMPLEMENTATION, DO NO USE DIRECTLY. The primary role of `XLACompileContext` is to mark operators inside a xla.compile() computation with attribute "_xla_compile_id=XYZ", where XYZ is a unique name. `ControlFlowContext` is used to perform the annotation since it integrates with Tensorflow constructs like ResourceVariables. For example, if a `ResourceVariable` is constructed inside a xla.compile() block, the `ResourceVariable` implementation can use `with ops.control_dependencies(None)` to build the variable's definition outside the compiled computation. """ def __init__(self, name, pivot): """Builds a new XLACompileContext. Args: name: a unique name for the context, used to populate the `_xla_compile_id` attribute. pivot: a pivot node. Nodes in the XLACompileContext that do not have any inputs will have a control dependency on the pivot node. This ensures that nodes are correctly included in any enclosing control flow contexts. """ super(XLACompileContext, self).__init__() self._name = name self._name_as_bytes = compat.as_bytes(name) self._unsupported_ops = [] self._pivot = pivot def report_unsupported_operations(self): if self._unsupported_ops: op_str = '\n'.join([ ' %s (%s)' % (op.type, op.name) for op in self._unsupported_ops[:_MAX_WARNING_LINES] ]) logging.warning('%d unsupported operations found: \n%s', len(self._unsupported_ops), op_str) if len(self._unsupported_ops) > _MAX_WARNING_LINES: logging.warning('... and %d more', len(self._unsupported_ops) - _MAX_WARNING_LINES) def _RemoveExternalControlEdges(self, op): """Remove any external control dependency on this op.""" internal_control_inputs = [] external_control_inputs = [] for x in op.control_inputs: # pylint: disable=protected-access is_internal_op = False ctxt = x._get_control_flow_context() while ctxt is not None: if ctxt == self: is_internal_op = True break ctxt = ctxt._outer_context if is_internal_op: internal_control_inputs.append(x) else: external_control_inputs.append(x) # pylint: enable=protected-access # pylint: disable=protected-access op._remove_all_control_inputs() op._add_control_inputs(internal_control_inputs) # pylint: enable=protected-access return internal_control_inputs, external_control_inputs def AddOp(self, op): """Create op in XLACompileContext and notifies outer context recursively.""" # pylint: disable=protected-access if op.type in _BLACKLISTED_OPS: logging.error( 'Operation of type %s (%s) is not supported in XLA. Execution will ' 'fail if this op is used in the graph. ', op.type, op.name) # TODO(ycao): Automatically disable summaries instead of reporting them. if op.type in _UNSUPPORTED_OPS: self._unsupported_ops.append(op) if any(x.dtype._is_ref_dtype for x in op.inputs): raise NotImplementedError( 'Non-resource Variables are not supported inside XLA computations ' '(operator name: %s)' % op.name) if _XLA_COMPILE_ATTR in op.node_def.attr: raise ValueError('XLA compiled computations cannot be nested, (operator ' 'name: %s)' % op.name) op._set_attr( _XLA_COMPILE_ATTR, attr_value_pb2.AttrValue(s=self._name_as_bytes)) op.graph.prevent_feeding(op) op.graph.prevent_fetching(op) # Remove any control edges from outer control flow contexts. These may cause # mismatched frame errors. An example is when one of op's inputs is # generated in a different While control flow context. (internal_control_inputs, external_control_inputs) = self._RemoveExternalControlEdges(op) if not op.inputs: # Add a control edge from the control pivot to this op. if not internal_control_inputs: # pylint: disable=protected-access op._add_control_input(self._pivot) # pylint: enable=protected-access else: for index in xrange(len(op.inputs)): x = op.inputs[index] real_x = self.AddValue(x) if real_x is not x: op._update_input(index, real_x) # pylint: disable=protected-access if external_control_inputs: # Use an identity to pull control inputs as data inputs. Note that we # ignore ops which don't have outputs. TODO(phawkins): fix that. with ops.control_dependencies(None): self.Enter() external_control_inputs = [ array_ops.identity(x.outputs[0]).op for x in external_control_inputs if x.outputs ] self.Exit() # pylint: disable=protected-access op._add_control_inputs(external_control_inputs) # pylint: enable=protected-access # Mark op's outputs as seen by this context and any outer contexts. output_names = [x.name for x in op.outputs] context = self while context is not None: # pylint: disable=protected-access context._values.update(output_names) context = context._outer_context # pylint: enable=protected-access if self._outer_context: self._outer_context.AddInnerOp(op) def AddValue(self, val): """Add `val` to the current context and its outer context recursively.""" if val.name in self._values: # Use the real value if it comes from outer context. result = self._external_values.get(val.name) return val if result is None else result result = val self._values.add(val.name) if self._outer_context: result = self._outer_context.AddValue(val) self._values.add(result.name) self._external_values[val.name] = result return result def AddInnerOp(self, op): self.AddOp(op) if self._outer_context: self._outer_context.AddInnerOp(op) @property def grad_state(self): # Define the gradient loop state associated with the XLACompileContext to # be None as the XLACompileContext does not get nested nor does the # grad_state outside the XLACompileContext affect the graph inside so the # grad_state should be as if this is the top-level gradient state. return None @property def back_prop(self): """Forwards to the enclosing while context, if any.""" if self.GetWhileContext(): return self.GetWhileContext().back_prop return False def _compile_internal(computation, inputs=None): """Builds graph operators that compiles and symbolically executes computation. Args: computation: A Python function that builds the computation to compile and execute. inputs: A list of inputs or `None` (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. Returns: Same data structure as if computation(*inputs) is called directly with some exceptions for correctness. Exceptions include: 1) None output 2) Single value output 3) Operation-only outputs Raises: ValueError: If any element in computation outputs is neither an operations or a value that can be converted to tensor. ValueError: If computation outputs is non-flat and contains any Operations. TypeError: If `inputs` is not a list or tuple. """ if inputs is None: inputs = [] if not isinstance(inputs, collections.Sequence): raise TypeError('inputs must be a list') # Flatten inputs. flat_inputs = nest.flatten(inputs) # Converts inputs to Tensors. flat_inputs = [ops.convert_to_tensor(x) for x in flat_inputs] cluster_name = ops.get_default_graph().unique_name('cluster') pivot = control_flow_ops.no_op(name=cluster_name + '/pivot') context = XLACompileContext(name=cluster_name, pivot=pivot) try: context.Enter() # Add identity ops so even unused inputs are 'consumed' by the # computation. flat_inputs = [ array_ops.identity(x, name='input_{}'.format(i)) for i, x in enumerate(flat_inputs) ] # Re-pack flat_inputs in same structure as 'inputs'. computation_inputs = nest.pack_sequence_as( structure=inputs, flat_sequence=flat_inputs) # Only resource variables work inside an XLA computation, so turn on # resource variables for the computation. vscope = variable_scope.get_variable_scope() saved_use_resource = vscope.use_resource vscope.set_use_resource(True) with _disable_summary_context(): outputs = computation(*computation_inputs) # Restore variable scope after computation. vscope.set_use_resource(saved_use_resource) outputs_is_flat = is_flat(outputs) if outputs_is_flat: output_tensors, control_deps = _postprocess_flat_outputs(outputs) else: output_tensors, control_deps = _postprocess_non_flat_outputs(outputs) context.ExitResult(output_tensors) finally: context.report_unsupported_operations() context.Exit() # When XLA computation returns only operations and no tensors, a NoOp # dependent on the operations in outputs is returned. Otherwise final # outputs would be empty and there is no way to trigger returned # operations. if not output_tensors: return control_flow_ops.group(control_deps, name='output_0') output_tensors = [ xla_ops.xla_cluster_output(o, name='output{}'.format(i)) for i, o in enumerate(output_tensors) ] with ops.control_dependencies(control_deps): # Wraps the outputs in identity operators that carries control # dependencies. output_tensors = [ array_ops.identity(o, name='output_%d' % i) for i, o in enumerate(output_tensors) ] # If `computation` returned non-flat output structure, pack output tensors # back into same structure. if not outputs_is_flat: output_tensors = nest.pack_sequence_as( structure=outputs, flat_sequence=output_tensors) return output_tensors def is_flat(outputs): """Checks if outputs is a flat structure. Following structures and values are considered flat: 1) None 2) A single object 3) A list or tuple of Tensors/Operations The only structures that this function understands are sequences and dictionaries. E.g. this means that if outputs contains a single user-defined Object, it is considered to be flat. Errors are raised later on if that Object cannot be converted to a Tensor. Args: outputs: Output from `computation` inside `xla.compile`. Returns: A boolean indicates whether outputs is flat. """ # If outputs is a list or tuple, check if it has any nested structure. If # there is, then outputs is non-flat. if isinstance(outputs, collections.Sequence): for o in outputs: if isinstance(o, collections.Sequence) or isinstance(o, dict): return False # If outputs is a dict, it is non-flat. if isinstance(outputs, dict): return False # Getting here means either outputs itself is a single non-structured value # or it is a flat list of single non-structured values. return True def _postprocess_flat_outputs(outputs): """Validates flat outputs and adds back device assignments. Args: outputs: Output from `computation` inside `xla.compile`. Returns: Tensors and Operations extracted from outputs. """ # Following code segment is to preserve legacy behavior. Previously we only # supported flat outputs and thus for consistency it was nice to convert even # single element into a tuple. But now that we support arbitrary output # structure, this is no longer necessary. # TODO(b/121383831): Migrate all legacy use cases and delete this special # case. # If the computation returns `None`, make it an empty tuple. if outputs is None: outputs = tuple() # If the computation only returned one value, make it a tuple. if not isinstance(outputs, collections.Sequence): outputs = (outputs,) # Append `no_op` here so that return value of this function always contains # at least one op that can trigger XlaLaunch node. outputs += (control_flow_ops.no_op(),) try: outputs = [ o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o) for o in outputs ] except Exception as e: raise ValueError( 'XLA computation function return values must all either be Operations' ' or convertible to Tensors. Got error: "%s"' % str(e)) # Separates the returned Operations and Tensors. output_operations = [o for o in outputs if isinstance(o, ops.Operation)] output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)] if outputs != output_tensors + output_operations: raise ValueError( 'XLA computation function must return zero or more Tensor values ' 'followed by zero or more Operations.') new_output_tensors = [] for t in output_tensors: with ops.device(t.device if t.device else ''): new_output_tensors.append(array_ops.identity(t)) return new_output_tensors, output_operations def _postprocess_non_flat_outputs(outputs): """Validates non-flat outputs and adds back device assignments. Args: outputs: Output from `computation` inside `xla.compile`. Returns: Tensors extracted from outputs and an empty list because Operations are not allowed in non-flat outputs.. """ # Convert all non-Operation outputs to Tensors. new_output_tensors = [] for o in nest.flatten(outputs): if isinstance(o, ops.Operation): raise ValueError( 'xla.compile does not support Operation as return value in non-flat ' 'output structure. You can set returned Operations as control ' 'dependencies of returned Tensors so Operations are triggered when ' 'Tensors are evaluated. Operation found: "%s"' % o.name) try: o = ops.convert_to_tensor(o) except Exception as e: raise ValueError( 'XLA computation function return values must all either be ' 'Operations or convertible to Tensors. Got error: "%s"' % str(e)) # Makes sure even pass-through inputs/outputs are touched in compile # context by creating an Identity node inside compile context. with ops.device(o.device if o.device else ''): new_output_tensors.append(array_ops.identity(o)) return new_output_tensors, [] @contextlib.contextmanager def _disable_summary_context(): """Enters a context where all summary ops are skipped. Summaries are not yet supported in xla.compile(). So we provide this context manager that can skip creating summary ops. This is a temporary workaround due to XLA not supporting summary ops. Yields: None. """ original_skip_summary_func = summary_op_util.skip_summary summary_op_util.skip_summary = lambda: True try: yield finally: summary_op_util.skip_summary = original_skip_summary_func class _CapturedObject(object): """A placeholder to capture an object.""" def __init__(self): self._object = None def capture(self, o): if self._object: raise RuntimeError( 'InternalError: _CapturedObject can capture only once. Please file ' 'bug.') self._object = o def get(self): return self._object def _get_scaffold(captured_scaffold_fn): """Retrieves the Scaffold from `captured_scaffold_fn`.""" scaffold_fn = captured_scaffold_fn.get() if not scaffold_fn: return None scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') return scaffold def check_function_argument_count(func, input_arity, infeed_queue): """Validate the number of input arguments to an XLA function. Args: func: the Python function that will be called to generate the body of an XLA computation graph. input_arity: the number of explicit arguments supplied by the caller. infeed_queue: if not None, the infeed queue that will supply additional arguments to the function. Returns: None if function can be called with the supplied number of arguments, or an error string if it cannot. """ def format_error(complaint, quantity): return '%s %d argument%s' % (complaint, quantity, '' if quantity == 1 else 's') num_args_supplied = input_arity if infeed_queue is not None: num_args_supplied += infeed_queue.number_of_tuple_elements arg_spec = tf_inspect.getargspec(func) num_func_args = len(arg_spec.args) if arg_spec.defaults is None: num_func_defaults = 0 else: num_func_defaults = len(arg_spec.defaults) min_func_args = num_func_args - num_func_defaults if num_args_supplied < min_func_args: # The required number of arguments is not enough to call the function. if num_func_defaults == 0 and arg_spec.varargs is None: return format_error('exactly', num_func_args) else: return format_error('at least', min_func_args) if arg_spec.varargs is None and num_args_supplied > num_func_args: # The required number of arguments is too many to call the function. if num_func_defaults == 0: return format_error('exactly', num_func_args) else: return format_error('at most', num_func_args) # Reaching here means either # 1) There are varargs, func can accept any number of arguments greater than # the minimum. # 2) Number of supplied arguments falls in range of acceptable argument count # of func. return None
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/xla/xla.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Exposes the Python wrapper conversion to trt_graph.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import platform import tempfile import six as _six from tensorflow.compiler.tf2tensorrt import wrap_py_utils from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.eager import wrap_function from tensorflow.python.framework import convert_to_constants from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.grappler import tf_optimizer from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.platform import tf_logging from tensorflow.python.saved_model import builder from tensorflow.python.saved_model import load from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import save from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import tag_constants from tensorflow.python.training import saver from tensorflow.python.training.tracking import tracking from tensorflow.python.util import nest from tensorflow.python.util.lazy_loader import LazyLoader # Lazily load the op, since it's not available in cpu-only builds. Importing # this at top will cause tests that imports TF-TRT fail when they're built # and run without CUDA/GPU. gen_trt_ops = LazyLoader( "gen_trt_ops", globals(), "tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops") # Register TRT ops in python, so that when users import this module they can # execute a TRT-converted graph without calling any of the methods in this # module. if wrap_py_utils.is_tensorrt_enabled(): if platform.system() == "Windows": raise RuntimeError("Windows platform is not supported") # This will call register_op_list() in # tensorflow/python/framework/op_def_registry.py, but it doesn't register # the op or the op kernel in C++ runtime. gen_trt_ops.trt_engine_op # pylint: disable=pointless-statement def _to_bytes(s): """Encode s if it is a sequence of chars.""" if isinstance(s, _six.text_type): return s.encode("utf-8", errors="surrogateescape") return s def _to_string(s): """Decode s if it is a sequence of bytes.""" if isinstance(s, _six.binary_type): return s.decode("utf-8") return s class TrtPrecisionMode(object): FP32 = "FP32" FP16 = "FP16" INT8 = "INT8" @staticmethod def supported_precision_modes(): precisions = [ TrtPrecisionMode.FP32, TrtPrecisionMode.FP16, TrtPrecisionMode.INT8 ] return precisions + [p.lower() for p in precisions] # Use a large enough number as the default max_workspace_size for TRT engines, # so it can produce reasonable performance results with the default. DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES = 1 << 30 # TrtConversionParams encapsulates the parameters that are used for TF-TRT # conversion. TrtConversionParams = collections.namedtuple( "TrtConversionParams", [ # A template RewriterConfig proto used to create a TRT-enabled # RewriterConfig. If None, it will use a default one. "rewriter_config_template", # The maximum GPU temporary memory which the TRT engine can use at # execution time. This corresponds to the 'workspaceSize' parameter of # nvinfer1::IBuilder::setMaxWorkspaceSize(). "max_workspace_size_bytes", # One of TrtPrecisionMode.supported_precision_modes(). "precision_mode", # The minimum number of nodes required for a subgraph to be replaced by # TRTEngineOp. "minimum_segment_size", # Whether to generate dynamic TRT ops which will build the TRT network # and engine at run time. # This option should be set to True in TF 2.0. "is_dynamic_op", # Max number of cached TRT engines in dynamic TRT ops. If the number of # cached engines is already at max but none of them can serve the input, # the TRTEngineOp will fall back to run the TF function based on which # the TRTEngineOp is created. "maximum_cached_engines", # This argument is ignored if precision_mode is not INT8. If set to # True, a calibration graph will be created to calibrate the missing # ranges. The calibration graph must be converted to an inference graph # by running calibration with calibrate(). If set to False, quantization # nodes will be expected for every tensor in the graph (exlcuding those # which will be fused). If a range is missing, an error will occur. # Please note that accuracy may be negatively affected if there is a # mismatch between which tensors TRT quantizes and which tensors were # trained with fake quantization. "use_calibration", # Max size for the input batch. # This option is deprecated in TF 2.0. "max_batch_size", ]) DEFAULT_TRT_CONVERSION_PARAMS = TrtConversionParams( rewriter_config_template=None, max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES, precision_mode=TrtPrecisionMode.FP32, minimum_segment_size=3, is_dynamic_op=True, maximum_cached_engines=1, use_calibration=True, max_batch_size=1) _TRT_ENGINE_OP_NAME = "TRTEngineOp" def _check_conversion_params(conversion_params): """Validate the provided TrtConversionParams. Args: conversion_params: a TrtConversionParams instance. Raises: TypeError: if any of the parameters are of unexpected type. ValueError: if any of the parameters are of unexpected value. """ supported_precision_modes = TrtPrecisionMode.supported_precision_modes() if conversion_params.precision_mode not in supported_precision_modes: raise ValueError( ("precision mode '{}' is not supported." "It should be one of {}").format(conversion_params.precision_mode, supported_precision_modes)) def _check_trt_version_compatibility(): """Check compatibility of TensorRT version. Raises: RuntimeError: if the TensorRT library version is incompatible. """ compiled_version = wrap_py_utils.get_linked_tensorrt_version() loaded_version = wrap_py_utils.get_loaded_tensorrt_version() tf_logging.info("Linked TensorRT version: %s" % str(compiled_version)) tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version)) version_mismatch = False if loaded_version[0] < compiled_version[0]: tf_logging.error( "TensorRT version mismatch. Tensorflow was compiled against " + "TensorRT %s but library loaded from environment is TensorRT %s" % (".".join([str(x) for x in compiled_version]), ".".join([str(x) for x in loaded_version])) + ". Please make sure that correct version of TensorRT " + "is available in the system and added to ldconfig or LD_LIBRARY_PATH") raise RuntimeError("Incompatible TensorRT library version") for i in zip(loaded_version, compiled_version): if i[0] != i[1]: tf_logging.warn("TensorRT mismatch. Compiled against version " + "%s, but loaded %s. Things may not work" % (".".join([str(x) for x in compiled_version]), ".".join([str(x) for x in loaded_version]))) version_mismatch = True break if not version_mismatch: tf_logging.info("Running against TensorRT version %s" % ".".join([str(x) for x in loaded_version])) def get_tensorrt_rewriter_config(conversion_params, is_v2=False): """Returns a RewriterConfig proto for TRT transformation. Args: conversion_params: a TrtConversionParams instance. is_v2: whether we're getting a RewriterConfig for TF 2.0. Returns: A RewriterConfig proto which sets a TensorRTOptimizer to run Grappler. Raises: TypeError: if any of the parameters are of unexpected type. ValueError: if any of the parameters are of unexpected value. """ if conversion_params.rewriter_config_template is not None and not isinstance( conversion_params.rewriter_config_template, rewriter_config_pb2.RewriterConfig): raise TypeError( "rewriter_config_template should be a RewriterConfig proto.") _check_conversion_params(conversion_params) rewriter_config_with_trt = rewriter_config_pb2.RewriterConfig() if conversion_params.rewriter_config_template is None: # Layout optimizer may add Const nodes followed by Reshape nodes, thus we # need to run constant folding again. rewriter_config_with_trt.optimizers.extend( ["constfold", "layout", "constfold"]) rewriter_config_with_trt.meta_optimizer_iterations = ( rewriter_config_pb2.RewriterConfig.ONE) else: rewriter_config_with_trt.CopyFrom( conversion_params.rewriter_config_template) optimizer = rewriter_config_with_trt.custom_optimizers.add() # Add a constfold optimizer to cleanup the unused Const nodes. rewriter_config_with_trt.custom_optimizers.add().name = "constfold" optimizer.name = "TensorRTOptimizer" optimizer.parameter_map[ "minimum_segment_size"].i = conversion_params.minimum_segment_size optimizer.parameter_map[ "max_workspace_size_bytes"].i = conversion_params.max_workspace_size_bytes optimizer.parameter_map["precision_mode"].s = _to_bytes( conversion_params.precision_mode) optimizer.parameter_map[ "maximum_cached_engines"].i = conversion_params.maximum_cached_engines optimizer.parameter_map[ "use_calibration"].b = conversion_params.use_calibration if is_v2: # Static mode (building TRT engine without executing the op) is deprecated # in TF 2.0. See TrtGraphConverterV2 for more details. if not conversion_params.is_dynamic_op: raise ValueError("Option is_dynamic_op=False is not supported in TF 2.0, " "please set it to True instead.") optimizer.parameter_map["is_dynamic_op"].b = True else: optimizer.parameter_map[ "max_batch_size"].i = conversion_params.max_batch_size optimizer.parameter_map["is_dynamic_op"].b = conversion_params.is_dynamic_op return rewriter_config_with_trt # Remove all scope prefixes in the node name. In TF 2.0, the same concrete # function can be initialized multiple times with different prefixes, and # this will result in the same TRTEngineOp being initialized multiple times # with different cache and duplicate TRT engines. # TODO(laigd): this may be caused by the fact that TRTEngineOp is not # stateful, need to investigate. # TODO(laigd): we rely on the fact that all functions are fully inlined # before TF-TRT optimizer is called, as otherwise it may generate the same # name when optimizing a different function graph. Fix this. def _get_canonical_engine_name(name): return name.split("/")[-1] class TrtGraphConverter(object): """A converter for TF-TRT transformation for TF 1.x GraphDef/SavedModels. To run the conversion without quantization calibration (e.g. for FP32/FP16 precision modes): ```python converter = TrtGraphConverter( input_saved_model_dir="my_dir", precision_mode=TrtPrecisionMode.FP16) converted_graph_def = converter.convert() converter.save(output_saved_model_dir) ``` To run the conversion with quantization calibration: ```python converter = TrtGraphConverter( input_saved_model_dir="my_dir", precision_mode=TrtPrecisionMode.INT8) converter.convert() # Run calibration 10 times. converted_graph_def = converter.calibrate( fetch_names=['output:0'], num_runs=10, feed_dict_fn=lambda: {'input:0': my_next_data()}) converter.save(output_saved_model_dir) ``` """ def __init__(self, input_saved_model_dir=None, input_saved_model_tags=None, input_saved_model_signature_key=None, input_graph_def=None, nodes_blacklist=None, session_config=None, max_batch_size=1, max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES, precision_mode=TrtPrecisionMode.FP32, minimum_segment_size=3, is_dynamic_op=False, maximum_cached_engines=1, use_calibration=True): """Initialize the converter. Args: input_saved_model_dir: the directory to load the SavedModel which contains the input graph to transforms. Used only when input_graph_def is None. input_saved_model_tags: list of tags to load the SavedModel. input_saved_model_signature_key: the key of the signature to optimize the graph for. input_graph_def: a GraphDef object containing a model to be transformed. If set to None, the graph will be read from the SavedModel loaded from input_saved_model_dir. nodes_blacklist: list of node names to prevent the converter from touching. session_config: the ConfigProto used to create a Session. It's also used as a template to create a TRT-enabled ConfigProto for conversion. If not specified, a default ConfigProto will be used. max_batch_size: max size for the input batch. max_workspace_size_bytes: the maximum GPU temporary memory which the TRT engine can use at execution time. This corresponds to the 'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize(). precision_mode: one of TrtPrecisionMode.supported_precision_modes(). minimum_segment_size: the minimum number of nodes required for a subgraph to be replaced by TRTEngineOp. is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT network and engine at run time. maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops. If the number of cached engines is already at max but none of them can serve the input, the TRTEngineOp will fall back to run the TF function based on which the TRTEngineOp is created. use_calibration: this argument is ignored if precision_mode is not INT8. If set to True, a calibration graph will be created to calibrate the missing ranges. The calibration graph must be converted to an inference graph by running calibration with calibrate(). If set to False, quantization nodes will be expected for every tensor in the graph (exlcuding those which will be fused). If a range is missing, an error will occur. Please note that accuracy may be negatively affected if there is a mismatch between which tensors TRT quantizes and which tensors were trained with fake quantization. Raises: ValueError: if the combination of the parameters is invalid. RuntimeError: if this class is used in TF 2.0. """ if context.executing_eagerly(): raise RuntimeError("Please use TrtGraphConverterV2 in TF 2.0.") if input_graph_def and input_saved_model_dir: raise ValueError( "Can only specify one of input_graph_def and input_saved_model_dir") if not input_graph_def and not input_saved_model_dir: raise ValueError("Must specify one of input_graph_def and " "input_saved_model_dir") _check_trt_version_compatibility() self._input_graph_def = input_graph_def self._nodes_blacklist = nodes_blacklist self._input_saved_model_dir = input_saved_model_dir self._converted = False self._grappler_meta_graph_def = None self._input_saved_model_tags = ( input_saved_model_tags or [tag_constants.SERVING]) self._input_saved_model_signature_key = ( input_saved_model_signature_key or signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY) self._session_config = session_config or config_pb2.ConfigProto() # For calibration usage. self._calibration_graph = None self._calibration_data_collected = False self._need_calibration = ( precision_mode == TrtPrecisionMode.INT8 and use_calibration) if self._need_calibration and not is_dynamic_op: tf_logging.warn( "INT8 precision mode with calibration is supported with " "dynamic TRT ops only. Disregarding is_dynamic_op parameter.") is_dynamic_op = True # TODO(laigd): # - Verify in int8 mode that maximum_cached_engines is set properly. # - If it fails to build the int8 engine it should return error. rewriter_config_template = None if (session_config and session_config.HasField("graph_options") and session_config.graph_options.HasField("rewrite_options")): rewriter_config_template = session_config.graph_options.rewrite_options self._conversion_params = TrtConversionParams( rewriter_config_template=rewriter_config_template, max_workspace_size_bytes=max_workspace_size_bytes, precision_mode=precision_mode, minimum_segment_size=minimum_segment_size, is_dynamic_op=is_dynamic_op, maximum_cached_engines=maximum_cached_engines, use_calibration=use_calibration, max_batch_size=max_batch_size) _check_conversion_params(self._conversion_params) def _run_conversion(self): """Run Grappler's OptimizeGraph() tool to convert the graph.""" # Create custom ConfigProto for Grappler. grappler_session_config = config_pb2.ConfigProto() grappler_session_config.CopyFrom(self._session_config) custom_rewriter_config = get_tensorrt_rewriter_config( conversion_params=self._conversion_params) grappler_session_config.graph_options.rewrite_options.CopyFrom( custom_rewriter_config) # Run Grappler. self._converted_graph_def = tf_optimizer.OptimizeGraph( grappler_session_config, self._grappler_meta_graph_def, graph_id=b"tf_graph") self._converted = True def _add_nodes_blacklist(self): if self._nodes_blacklist: collection_def = self._grappler_meta_graph_def.collection_def["train_op"] blacklist = collection_def.node_list.value for i in self._nodes_blacklist: if isinstance(i, ops.Tensor): blacklist.append(_to_bytes(i.name)) else: blacklist.append(_to_bytes(i)) def _convert_graph_def(self): """Convert the input GraphDef.""" graph = ops.Graph() with graph.as_default(): importer.import_graph_def(self._input_graph_def, name="") self._grappler_meta_graph_def = saver.export_meta_graph( graph_def=graph.as_graph_def(add_shapes=True), graph=graph) self._add_nodes_blacklist() self._run_conversion() def _collections_to_keep(self, collection_keys): # TODO(laigd): currently we use the collection key to filter out # collections that depend on variable ops, but this may miss some # other user-defined collections. A better way would be to use # CollectionDef::NodeList for the filtering. collections_to_remove = ( ops.GraphKeys._VARIABLE_COLLECTIONS + [ ops.GraphKeys.TRAIN_OP, ops.GraphKeys.WHILE_CONTEXT, ops.GraphKeys.COND_CONTEXT ]) return [key for key in collection_keys if key not in collections_to_remove] def _convert_saved_model(self): """Convert the input SavedModel.""" graph = ops.Graph() with session.Session(graph=graph, config=self._session_config) as sess: input_meta_graph_def = loader.load(sess, self._input_saved_model_tags, self._input_saved_model_dir) input_signature_def = input_meta_graph_def.signature_def[ self._input_saved_model_signature_key] def _gather_names(tensor_info): """Get the node names from a TensorInfo.""" return set([tensor_info[key].name.split(":")[0] for key in tensor_info]) # Get input and outputs from all SignatureDef. output_node_names = _gather_names(input_signature_def.inputs).union( _gather_names(input_signature_def.outputs)) # Preserve nodes in collection for collection_key in self._collections_to_keep( input_meta_graph_def.collection_def): for op in sess.graph.get_collection(collection_key): if isinstance(op, ops.Operation): output_node_names.add(op.name.split(":")[0]) # Freeze the variables in the SavedModel graph and copy the frozen # graph over. frozen_graph_def = graph_util.convert_variables_to_constants( sess, sess.graph.as_graph_def(add_shapes=True), list(output_node_names)) self._grappler_meta_graph_def = meta_graph_pb2.MetaGraphDef() self._grappler_meta_graph_def.graph_def.CopyFrom(frozen_graph_def) # Copy the collections that are not variables. for collection_key in self._collections_to_keep( input_meta_graph_def.collection_def): self._grappler_meta_graph_def.collection_def[collection_key].CopyFrom( input_meta_graph_def.collection_def[collection_key]) self._add_nodes_blacklist() # Copy other information. self._grappler_meta_graph_def.meta_info_def.CopyFrom( input_meta_graph_def.meta_info_def) self._grappler_meta_graph_def.signature_def[ self._input_saved_model_signature_key].CopyFrom(input_signature_def) # TODO(laigd): maybe add back AssetFileDef. self._run_conversion() def convert(self): """Run the TF-TRT conversion. Returns: The converted GraphDef for TF 1.x. """ assert not self._converted if self._input_graph_def: self._convert_graph_def() else: self._convert_saved_model() return self._converted_graph_def def calibrate(self, fetch_names, num_runs, feed_dict_fn=None, input_map_fn=None): """Run the calibration and return the calibrated GraphDef. Args: fetch_names: a list of output tensor name to fetch during calibration. num_runs: number of runs of the graph during calibration. feed_dict_fn: a function that returns a dictionary mapping input names (as strings) in the GraphDef to be calibrated to values (e.g. Python list, numpy arrays, etc). One and only one of `feed_dict_fn` and `input_map_fn` should be specified. input_map_fn: a function that returns a dictionary mapping input names (as strings) in the GraphDef to be calibrated to Tensor objects. The values of the named input tensors in the GraphDef to be calibrated will be re-mapped to the respective `Tensor` values during calibration. One and only one of `feed_dict_fn` and `input_map_fn` should be specified. Raises: ValueError: if the input combination is invalid. RuntimeError: if this method is called in eager mode. Returns: The GraphDef after the calibration. """ assert self._converted assert self._need_calibration assert not self._calibration_data_collected if (feed_dict_fn and input_map_fn) or (not feed_dict_fn and not input_map_fn): raise ValueError( "Should specify one and only one of feed_dict_fn and input_map_fn.") if input_map_fn: for k, v in input_map_fn().items(): if not isinstance(k, str): raise ValueError("Keys of input_map_fn must be of type str") if not isinstance(v, ops.Tensor): raise ValueError("Values of input_map_fn must be of type tf.Tensor") self._calibration_graph = ops.Graph() with self._calibration_graph.as_default(): fetches = importer.import_graph_def( self._converted_graph_def, input_map=input_map_fn() if input_map_fn else None, return_elements=fetch_names, name="") with session.Session( graph=self._calibration_graph, config=self._session_config) as calibration_sess: for _ in range(num_runs): calibration_sess.run( fetches, feed_dict=feed_dict_fn() if feed_dict_fn else None) # Maps device name to the corresponding get_calibration_data. # # TODO(laigd): a better way would be to use calibration_sess to list # all the devices, add one get_calibration_data for each device, and # fetch each such op for every resource until its found. This can work # even when the device of the TRTEngineOp is empty or not fully specified. device_to_get_resource_op_map = {} with self._calibration_graph.as_default(): resource_name_input = array_ops.placeholder(dtypes.string) for node in self._converted_graph_def.node: if node.op == _TRT_ENGINE_OP_NAME: # Adds the get_calibration_data op for the device if not done # before. We only add one such op for each device. # TODO(laigd): What if the device is empty????? if node.device not in device_to_get_resource_op_map: with self._calibration_graph.device(node.device): serialized_resources_output = ( gen_trt_ops.get_calibration_data_op(resource_name_input)) device_to_get_resource_op_map[node.device] = ( serialized_resources_output) # Get the calibration resource. calibration_result = calibration_sess.run( device_to_get_resource_op_map[node.device], feed_dict={ resource_name_input: _get_canonical_engine_name(node.name) }) node.attr["calibration_data"].s = calibration_result self._calibration_data_collected = True return self._converted_graph_def def save(self, output_saved_model_dir): """Save the converted graph as a SavedModel. Args: output_saved_model_dir: construct a SavedModel using the converted GraphDef and save it to the specified directory. This option only works when the input graph is loaded from a SavedModel, i.e. when input_saved_model_dir is specified and input_graph_def is None in __init__(). Raises: ValueError: if the input to the converter is a GraphDef instead of a SavedModel. """ assert self._converted if self._need_calibration: assert self._calibration_data_collected if self._input_graph_def: raise ValueError( "Not able to save to a SavedModel since input is a GraphDef") def _restore_collections(dest_graph, src_meta_graph_def, collection_keys): """Restores collections that we need to keep.""" scope = "" for key in collection_keys: collection_def = src_meta_graph_def.collection_def[key] kind = collection_def.WhichOneof("kind") if kind is None: tf_logging.error( "Cannot identify data type for collection %s. Skipping.", key) continue from_proto = ops.get_from_proto_function(key) if from_proto and kind == "bytes_list": proto_type = ops.get_collection_proto_type(key) # It is assumed that there are no Variables Keys in collections for value in collection_def.bytes_list.value: proto = proto_type() proto.ParseFromString(value) try: new_value = from_proto(proto, import_scope=scope) except: continue dest_graph.add_to_collection(key, new_value) else: field = getattr(collection_def, kind) if kind == "node_list": for value in field.value: name = ops.prepend_name_scope(value, scope) # Since the graph has been optimized, the node may no longer # exists try: col_op = dest_graph.as_graph_element(name) except (TypeError, ValueError, KeyError) as e: continue dest_graph.add_to_collection(key, col_op) elif kind == "int64_list": # NOTE(opensource): This force conversion is to work around the # fact that Python2 distinguishes between int and long, while # Python3 has only int. for value in field.value: dest_graph.add_to_collection(key, int(value)) else: for value in field.value: dest_graph.add_to_collection(key, ops.prepend_name_scope(value, scope)) # Write the transformed graphdef as SavedModel. saved_model_builder = builder.SavedModelBuilder(output_saved_model_dir) with ops.Graph().as_default(): importer.import_graph_def(self._converted_graph_def, name="") _restore_collections( ops.get_default_graph(), self._grappler_meta_graph_def, self._collections_to_keep( self._grappler_meta_graph_def.collection_def)) # We don't use any specific converter here. with session.Session(config=self._session_config) as sess: saved_model_builder.add_meta_graph_and_variables( sess, self._input_saved_model_tags, signature_def_map=self._grappler_meta_graph_def.signature_def) # Ignore other meta graphs from the input SavedModel. saved_model_builder.save() def _get_resource_handle(name, device): with ops.device(device): return gen_trt_ops.create_trt_resource_handle(resource_name=name) class _TRTEngineResourceDeleter(tracking.CapturableResourceDeleter): """Resource deleter for destroying TRT engine cache resource.""" def __init__(self, resource_name, device): super(_TRTEngineResourceDeleter, self).__init__() self._resource_name = resource_name self._device = device def destroy_resource(self): handle = _get_resource_handle(self._resource_name, self._device) with ops.device(self._device): gen_resource_variable_ops.destroy_resource_op( handle, ignore_lookup_error=True) class _TRTEngineResource(tracking.TrackableResource): """Class to track the serialized engines resource.""" def __init__(self, resource_name, filename, maximum_cached_engines, device="GPU"): super(_TRTEngineResource, self).__init__( device=device, deleter=_TRTEngineResourceDeleter(resource_name, device)) self._resource_name = resource_name # Track the serialized engine file in the SavedModel. self._filename = self._track_trackable( tracking.TrackableAsset(filename), "_serialized_trt_resource_filename") self._maximum_cached_engines = maximum_cached_engines def _create_resource(self): return _get_resource_handle(self._resource_name, self._resource_device) def _initialize(self): gen_trt_ops.initialize_trt_resource( self.resource_handle, self._filename, max_cached_engines_count=self._maximum_cached_engines) class TrtGraphConverterV2(object): """An offline converter for TF-TRT transformation for TF 2.0 SavedModels. Note that in V2, is_dynamic_op=False is not supported, meaning TRT engines will be built only when the corresponding TRTEngineOp is executed. But we still provide a way to avoid the cost of building TRT engines during inference (see more below). There are several ways to run the conversion: 1. FP32/FP16 precision ```python params = DEFAULT_TRT_CONVERSION_PARAMS._replace( precision_mode='FP16') converter = TrtGraphConverterV2( input_saved_model_dir="my_dir", conversion_params=params) converter.convert() converter.save(output_saved_model_dir) ``` In this case, no TRT engines will be built or saved in the converted SavedModel. But if input data is available during conversion, we can still build and save the TRT engines to reduce the cost during inference (see option 2 below). 2. FP32/FP16 precision with pre-built engines ```python params = DEFAULT_TRT_CONVERSION_PARAMS._replace( precision_mode='FP16', # Set this to a large enough number so it can cache all the engines. maximum_cached_engines=16) converter = TrtGraphConverterV2( input_saved_model_dir="my_dir", conversion_params=params) converter.convert() # Define a generator function that yields input data, and use it to execute # the graph to build TRT engines. # With TensorRT 5.1, different engines will be built (and saved later) for # different input shapes to the TRTEngineOp. def my_input_fn(): for _ in range(num_runs): inp1, inp2 = ... yield inp1, inp2 converter.build(input_fn=my_input_fn) # Generate corresponding TRT engines converter.save(output_saved_model_dir) # Generated engines will be saved. ``` In this way, one engine will be built/saved for each unique input shapes of the TRTEngineOp. This is good for applications that cannot afford building engines during inference but have access to input data that is similar to the one used in production (for example, that has the same input shapes). Also, the generated TRT engines is platform dependent, so we need to run `build()` in an environment that is similar to production (e.g. with same type of GPU). 3. INT8 precision and calibration with pre-built engines ```python params = DEFAULT_TRT_CONVERSION_PARAMS._replace( precision_mode='INT8', # Currently only one INT8 engine is supported in this mode. maximum_cached_engines=1, use_calibration=True) converter = TrtGraphConverterV2( input_saved_model_dir="my_dir", conversion_params=params) # Define a generator function that yields input data, and run INT8 # calibration with the data. All input data should have the same shape. # At the end of convert(), the calibration stats (e.g. range information) # will be saved and can be used to generate more TRT engines with different # shapes. Also, one TRT engine will be generated (with the same shape as # the calibration data) for save later. def my_calibration_input_fn(): for _ in range(num_runs): inp1, inp2 = ... yield inp1, inp2 converter.convert(calibration_input_fn=my_calibration_input_fn) # (Optional) Generate more TRT engines offline (same as the previous # option), to avoid the cost of generating them during inference. def my_input_fn(): for _ in range(num_runs): inp1, inp2 = ... yield inp1, inp2 converter.build(input_fn=my_input_fn) # Save the TRT engine and the engines. converter.save(output_saved_model_dir) ``` """ def __init__(self, input_saved_model_dir=None, input_saved_model_tags=None, input_saved_model_signature_key=None, conversion_params=DEFAULT_TRT_CONVERSION_PARAMS): """Initialize the converter. Args: input_saved_model_dir: the directory to load the SavedModel which contains the input graph to transforms. Used only when input_graph_def is None. input_saved_model_tags: list of tags to load the SavedModel. input_saved_model_signature_key: the key of the signature to optimize the graph for. conversion_params: a TrtConversionParams instance. Raises: ValueError: if the combination of the parameters is invalid. """ assert context.executing_eagerly() _check_trt_version_compatibility() _check_conversion_params(conversion_params) self._conversion_params = conversion_params self._input_saved_model_dir = input_saved_model_dir self._input_saved_model_tags = ( input_saved_model_tags or [tag_constants.SERVING]) self._input_saved_model_signature_key = ( input_saved_model_signature_key or signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY) self._need_calibration = ( conversion_params.precision_mode == TrtPrecisionMode.INT8 and conversion_params.use_calibration) if (self._need_calibration and not conversion_params.is_dynamic_op): raise ValueError("INT8 precision mode with calibration is not supported " "with static TensorRT ops. Set is_dynamic_op to True.") self._converted = False def _run_conversion(self, meta_graph_def): """Run Grappler's OptimizeGraph() tool to convert the graph. Args: meta_graph_def: the MetaGraphDef instance to run the optimizations on. Returns: The optimized GraphDef. """ rewriter_config = get_tensorrt_rewriter_config( conversion_params=self._conversion_params, is_v2=True) grappler_session_config = config_pb2.ConfigProto() grappler_session_config.graph_options.rewrite_options.CopyFrom( rewriter_config) return tf_optimizer.OptimizeGraph( grappler_session_config, meta_graph_def, graph_id=b"tf_graph") def _for_each_trt_node(self, graph_def, fn): """Helper method to manipulate all TRTEngineOps in a GraphDef.""" for node in graph_def.node: if node.op == _TRT_ENGINE_OP_NAME: fn(node) for func in graph_def.library.function: for node in func.node_def: if node.op == _TRT_ENGINE_OP_NAME: fn(node) # TODO(laigd): provide a utility function to optimize a ConcreteFunction and # use it here (b/124792963). def convert(self, calibration_input_fn=None): """Convert the input SavedModel in 2.0 format. Args: calibration_input_fn: a generator function that yields input data as a list or tuple, which will be used to execute the converted signature for calibration. All the returned input data should have the same shape. Example: ``` def input_fn(): yield input1, input2, input3 ``` Raises: ValueError: if the input combination is invalid. Returns: The TF-TRT converted Function. """ assert not self._converted if (self._need_calibration and not calibration_input_fn): raise ValueError("Should specify calibration_input_fn because INT8 " "calibration is needed") if (not self._need_calibration and calibration_input_fn): raise ValueError("Should not specify calibration_input_fn because INT8 " "calibration is not needed") self._saved_model = load.load(self._input_saved_model_dir, self._input_saved_model_tags) func = self._saved_model.signatures[self._input_saved_model_signature_key] frozen_func = convert_to_constants.convert_variables_to_constants_v2(func) grappler_meta_graph_def = saver.export_meta_graph( graph_def=frozen_func.graph.as_graph_def(), graph=frozen_func.graph) # Add a collection 'train_op' so that Grappler knows the outputs. fetch_collection = meta_graph_pb2.CollectionDef() for array in frozen_func.inputs + frozen_func.outputs: fetch_collection.node_list.value.append(array.name) grappler_meta_graph_def.collection_def["train_op"].CopyFrom( fetch_collection) # Run TRT optimizer in Grappler to convert the graph. self._converted_graph_def = self._run_conversion(grappler_meta_graph_def) self._converted_func = wrap_function.function_from_graph_def( self._converted_graph_def, [tensor.name for tensor in frozen_func.inputs], [tensor.name for tensor in frozen_func.outputs]) # Reconstruct the output signatures using the ones from original model. self._converted_func.graph.structured_outputs = nest.pack_sequence_as( func.graph.structured_outputs, self._converted_func.graph.structured_outputs) if self._need_calibration: for inp in calibration_input_fn(): self._converted_func(*map(ops.convert_to_tensor, inp)) def _save_calibration_table(node): calibration_table = gen_trt_ops.get_calibration_data_op( _get_canonical_engine_name(node.name)) node.attr["calibration_data"].s = calibration_table.numpy() self._for_each_trt_node(self._converted_graph_def, _save_calibration_table) # Rebuild the function since calibration has changed the graph. calibrated_func = wrap_function.function_from_graph_def( self._converted_graph_def, [tensor.name for tensor in self._converted_func.inputs], [tensor.name for tensor in self._converted_func.outputs]) calibrated_func.graph.structured_outputs = nest.pack_sequence_as( self._converted_func.graph.structured_outputs, calibrated_func.graph.structured_outputs) self._converted_func = calibrated_func self._converted = True def build(self, input_fn): """Run inference with converted graph in order to build TensorRT engines. Args: input_fn: a generator function that yields input data as a list or tuple, which will be used to execute the converted signature to generate TRT engines. Example: ``` def input_fn(): yield input1, input2, input3 ``` """ for inp in input_fn(): self._converted_func(*map(ops.convert_to_tensor, inp)) def save(self, output_saved_model_dir): """Save the converted SavedModel. Args: output_saved_model_dir: directory to saved the converted SavedModel. """ assert self._converted # Serialize the TRT engines in the cache if any, and create trackable # resource to track them. engine_asset_dir = tempfile.mkdtemp() resource_map = {} def _serialize_and_track_engine(node): """Serialize TRT engines in the cache and track them.""" # Don't dump the same cache twice. canonical_engine_name = _get_canonical_engine_name(node.name) if canonical_engine_name in resource_map: return filename = os.path.join(engine_asset_dir, "trt-serialized-engine." + canonical_engine_name) try: gen_trt_ops.serialize_trt_resource( resource_name=canonical_engine_name, filename=filename, delete_resource=True) except errors.NotFoundError: # If user haven't run the function to populate the engine, it's fine, # and we don't need to track any serialized TRT engines. return # TODO(laigd): add an option for the user to choose the device. resource_map[canonical_engine_name] = _TRTEngineResource( canonical_engine_name, filename, self._conversion_params.maximum_cached_engines) self._for_each_trt_node(self._converted_graph_def, _serialize_and_track_engine) self._saved_model.trt_engine_resources = resource_map # Rewrite the signature map using the optimized ConcreteFunction. signatures = { key: value for key, value in self._saved_model.signatures.items() } signatures[self._input_saved_model_signature_key] = self._converted_func save.save(self._saved_model, output_saved_model_dir, signatures) # TODO(laigd): use TrtConversionParams here. def create_inference_graph( input_graph_def, outputs, max_batch_size=1, max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES, precision_mode=TrtPrecisionMode.FP32, minimum_segment_size=3, is_dynamic_op=False, maximum_cached_engines=1, input_saved_model_dir=None, input_saved_model_tags=None, input_saved_model_signature_key=None, output_saved_model_dir=None, session_config=None): """Python wrapper for the TRT transformation. Args: input_graph_def: a GraphDef object containing a model to be transformed. If set to None, the graph will be read from the SavedModel loaded from input_saved_model_dir. outputs: list of tensors or node names for the model outputs. Only used when input_graph_def is not None. max_batch_size: max size for the input batch. max_workspace_size_bytes: the maximum GPU temporary memory which the TRT engine can use at execution time. This corresponds to the 'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize(). precision_mode: one of TrtPrecisionMode.supported_precision_modes(). minimum_segment_size: the minimum number of nodes required for a subgraph to be replaced by TRTEngineOp. is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT network and engine at run time. maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops. If the number of cached engines is already at max but none of them can serve the input, the TRTEngineOp will fall back to run the TF function based on which the TRTEngineOp is created. input_saved_model_dir: the directory to load the SavedModel which contains the input graph to transforms. Used only when input_graph_def is None. input_saved_model_tags: list of tags to load the SavedModel. input_saved_model_signature_key: the key of the signature to optimize the graph for. output_saved_model_dir: if not None, construct a SavedModel using the returned GraphDef and save it to the specified directory. This option only works when the input graph is loaded from a SavedModel, i.e. when input_saved_model_dir is specified and input_graph_def is None. session_config: the ConfigProto used to create a Session. It's also used as a template to create a TRT-enabled ConfigProto for conversion. If not specified, a default ConfigProto will be used. Returns: A GraphDef transformed from input_graph_def (or the SavedModel graph def loaded from input_saved_model_dir, if input_graph_def is not present), where all TRT compatible subgraphs are replaced with TRTEngineOps, and a TF function is added for each of the subgraphs. If is_dynamic_op is True, each TRTEngineOp will contain a serialized subgraph GraphDef, which will be converted to a TRT engine at execution time and the TRT engine will be cached for future usage. A new TRT engine will be created each time when none of the cached engines match the input shapes. If it fails to execute the TRT engine or the number of cached engines reaches maximum_cached_engines, the op will fall back to call the corresponding TF function. If is_dynamic_op is False, each TRTEngineOp will contain a serialized TRT engine created from the corresponding subgraph. No more engines will be created on the fly, and the op will fall back to call the corresponding TF function when it fails to execute the engine. Raises: ValueError: if the combination of the parameters is invalid. """ trt_converter = TrtGraphConverter( input_saved_model_dir=input_saved_model_dir, input_saved_model_tags=input_saved_model_tags, input_saved_model_signature_key=input_saved_model_signature_key, input_graph_def=input_graph_def, nodes_blacklist=outputs, session_config=session_config, max_batch_size=max_batch_size, max_workspace_size_bytes=max_workspace_size_bytes, precision_mode=precision_mode, minimum_segment_size=minimum_segment_size, is_dynamic_op=is_dynamic_op, maximum_cached_engines=maximum_cached_engines, use_calibration=False) converted_graph_def = trt_converter.convert() if output_saved_model_dir: trt_converter.save(output_saved_model_dir) return converted_graph_def
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/trt_convert.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities to test TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gc import os import tempfile import numpy as np from tensorflow.compiler.tf2tensorrt.wrap_py_utils import is_tensorrt_enabled from tensorflow.core.framework import graph_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.compiler.tensorrt import trt_convert from tensorflow.python.eager import def_function from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_spec from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.saved_model import builder from tensorflow.python.saved_model import load from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import loader_impl from tensorflow.python.saved_model import save from tensorflow.python.saved_model import signature_constants from tensorflow.python.saved_model import signature_def_utils from tensorflow.python.saved_model import tag_constants from tensorflow.python.saved_model import utils from tensorflow.python.tools import saved_model_utils from tensorflow.python.training.tracking import tracking from tensorflow.python.util.lazy_loader import LazyLoader _SAVED_MODEL_SIGNATURE_KEY = "mypredict" gen_trt_ops = LazyLoader( "gen_trt_ops", globals(), "tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops") class TrtConvertTest(test_util.TensorFlowTestCase): """Class to test Tensorflow-TensorRT integration python API.""" # Use a small max_workspace_size for tests so they don't consume too much GPU # memory. _TRT_MAX_WORKSPACE_SIZE_BYTES = 2 << 20 def mkdtemp(self): return tempfile.mkdtemp(dir=self.get_temp_dir()) def testGetTensorrtRewriterConfig(self): """Test case for TrtGraphConverter.get_tensorrt_rewriter_config().""" if not is_tensorrt_enabled(): return conversion_params = trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace( max_batch_size=128, max_workspace_size_bytes=1234, precision_mode="INT8", minimum_segment_size=10, is_dynamic_op=True, maximum_cached_engines=2) rewriter_cfg = trt_convert.get_tensorrt_rewriter_config( conversion_params=conversion_params) self.assertEqual(["constfold", "layout", "constfold"], rewriter_cfg.optimizers) self.assertEqual(rewriter_config_pb2.RewriterConfig.ONE, rewriter_cfg.meta_optimizer_iterations) trt_optimizer = None for optimizer in rewriter_cfg.custom_optimizers: if optimizer.name == "TensorRTOptimizer": self.assertTrue(trt_optimizer is None) trt_optimizer = optimizer self.assertTrue(trt_optimizer is not None) for key in [ "minimum_segment_size", "max_batch_size", "is_dynamic_op", "max_workspace_size_bytes", "precision_mode", "maximum_cached_engines" ]: self.assertTrue(key in trt_optimizer.parameter_map) self.assertEqual(10, trt_optimizer.parameter_map["minimum_segment_size"].i) self.assertEqual(128, trt_optimizer.parameter_map["max_batch_size"].i) self.assertEqual(True, trt_optimizer.parameter_map["is_dynamic_op"].b) self.assertEqual(1234, trt_optimizer.parameter_map["max_workspace_size_bytes"].i) self.assertEqual( trt_convert._to_bytes("INT8"), trt_optimizer.parameter_map["precision_mode"].s) self.assertEqual(2, trt_optimizer.parameter_map["maximum_cached_engines"].i) def _GetConfigProto(self): """Get ConfigProto for session creation.""" config = config_pb2.ConfigProto( gpu_options=config_pb2.GPUOptions(allow_growth=True)) return config @classmethod def _GetGraph(cls, inp1, inp2, var): """Get the graph for testing.""" # The graph computes: inp1^2 + inp1*var + inp1 + inp2 + var add = inp1 + var mul = inp1 * add add = mul + add add = add + inp2 out = array_ops.identity(add, name="output") return out def _GetModelForV2(self): class SimpleModel(tracking.AutoTrackable): def __init__(self): self.v = None @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32), tensor_spec.TensorSpec(shape=[None, 1, 1], dtype=dtypes.float32) ]) def run(self, inp1, inp2): if self.v is None: self.v = variables.Variable([[[1.0]]], dtype=dtypes.float32) return TrtConvertTest._GetGraph(inp1, inp2, self.v) return SimpleModel() def _GetGraphForV1(self): g = ops.Graph() with g.as_default(): with g.device("/GPU:0"): inp1 = array_ops.placeholder( dtype=dtypes.float32, shape=[None, 1, 1], name="input1") inp2 = array_ops.placeholder( dtype=dtypes.float32, shape=[None, 1, 1], name="input2") var = variables.Variable([[[1.0]]], dtype=dtypes.float32, name="v1") out = TrtConvertTest._GetGraph(inp1, inp2, var) return g, var, inp1, inp2, out def _GetGraphDef(self): """Get the graph def for testing.""" g, var, _, _, _ = self._GetGraphForV1() with self.session(graph=g, config=self._GetConfigProto()) as sess: sess.run(var.initializer) graph_def = graph_util.convert_variables_to_constants( sess, g.as_graph_def(add_shapes=True), ["output"]) node_name_to_op = {node.name: node.op for node in graph_def.node} self.assertEqual( { "v1": "Const", "add/ReadVariableOp": "Identity", "input1": "Placeholder", "input2": "Placeholder", "add": "AddV2", "mul": "Mul", "add_1": "AddV2", "add_2": "AddV2", "output": "Identity" }, node_name_to_op) return graph_def def _WriteInputSavedModel(self, input_saved_model_dir): """Write the saved model as an input for testing.""" g, var, inp1, inp2, out = self._GetGraphForV1() signature_def = signature_def_utils.build_signature_def( inputs={ "myinput1": utils.build_tensor_info(inp1), "myinput2": utils.build_tensor_info(inp2) }, outputs={"myoutput": utils.build_tensor_info(out)}, method_name=signature_constants.PREDICT_METHOD_NAME) saved_model_builder = builder.SavedModelBuilder(input_saved_model_dir) with self.session(graph=g, config=self._GetConfigProto()) as sess: sess.run(var.initializer) saved_model_builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map={_SAVED_MODEL_SIGNATURE_KEY: signature_def}) saved_model_builder.save() def _ConvertGraph(self, input_saved_model_dir=None, output_saved_model_dir=None, need_calibration=False, max_batch_size=1, minimum_segment_size=3, is_dynamic_op=False, maximum_cached_engines=1): """Helper method to convert a GraphDef or SavedModel using TF-TRT.""" converter = trt_convert.TrtGraphConverter( input_saved_model_dir=input_saved_model_dir, input_saved_model_signature_key=_SAVED_MODEL_SIGNATURE_KEY, input_graph_def=None if input_saved_model_dir else self._GetGraphDef(), nodes_blacklist=None if input_saved_model_dir else ["output"], session_config=self._GetConfigProto(), max_batch_size=max_batch_size, max_workspace_size_bytes=TrtConvertTest._TRT_MAX_WORKSPACE_SIZE_BYTES, precision_mode=(trt_convert.TrtPrecisionMode.INT8 if need_calibration else trt_convert.TrtPrecisionMode.FP32), minimum_segment_size=minimum_segment_size, is_dynamic_op=is_dynamic_op, maximum_cached_engines=maximum_cached_engines) output_graph_def = converter.convert() if need_calibration: class CalibrationData(object): def __init__(self): self._data = 0 def next(self): self._data += 1 return {"input1:0": [[[self._data]]], "input2:0": [[[self._data]]]} output_graph_def = converter.calibrate( fetch_names=["output:0"], num_runs=10, feed_dict_fn=CalibrationData().next) if output_saved_model_dir is not None: converter.save(output_saved_model_dir=output_saved_model_dir) return output_graph_def def _TestTrtGraphConverter(self, input_saved_model_dir=None, output_saved_model_dir=None, need_calibration=False, is_dynamic_op=False): """General method to test trt_convert.TrtGraphConverter().""" output_graph_def = self._ConvertGraph( input_saved_model_dir=input_saved_model_dir, output_saved_model_dir=output_saved_model_dir, need_calibration=need_calibration, is_dynamic_op=is_dynamic_op) graph_defs_to_verify = [output_graph_def] if output_saved_model_dir: saved_model_graph_def = saved_model_utils.get_meta_graph_def( output_saved_model_dir, tag_constants.SERVING).graph_def self.assertIsInstance(saved_model_graph_def, graph_pb2.GraphDef) graph_defs_to_verify.append(saved_model_graph_def) for graph_def in graph_defs_to_verify: node_name_to_op = {node.name: node.op for node in graph_def.node} self.assertEqual( { "input1": "Placeholder", "input2": "Placeholder", "TRTEngineOp_0": "TRTEngineOp", "output": "Identity" }, node_name_to_op) if need_calibration: trt_engine_nodes = [ node for node in graph_def.node if node.op == "TRTEngineOp" ] self.assertNotEmpty(trt_engine_nodes) for node in trt_engine_nodes: self.assertTrue(len(node.attr["calibration_data"].s)) # Run the calibrated graph. # TODO(laigd): consider having some input where the answer is different. with ops.Graph().as_default(): importer.import_graph_def(graph_def, name="") with self.session(config=self._GetConfigProto()) as sess: for test_data in range(10): self.assertEqual((test_data + 1.0)**2 + test_data, sess.run( "output:0", feed_dict={ "input1:0": [[[test_data]]], "input2:0": [[[test_data]]] })) @test_util.deprecated_graph_mode_only def testTrtGraphConverter_BasicConversion(self): """Test case for trt_convert.TrtGraphConverter().""" if not is_tensorrt_enabled(): return input_saved_model_dir = self.mkdtemp() self._WriteInputSavedModel(input_saved_model_dir) for need_calibration in [False, True]: # Use GraphDef as input. self._TestTrtGraphConverter() # Use SavedModel as input. self._TestTrtGraphConverter( input_saved_model_dir=input_saved_model_dir, output_saved_model_dir=self.mkdtemp(), need_calibration=need_calibration) def _CreateConverterV2( self, input_saved_model_dir, input_saved_model_signature_key=_SAVED_MODEL_SIGNATURE_KEY, precision_mode=trt_convert.TrtPrecisionMode.FP32, is_dynamic_op=True, maximum_cached_engines=2): return trt_convert.TrtGraphConverterV2( input_saved_model_dir=input_saved_model_dir, input_saved_model_signature_key=input_saved_model_signature_key, conversion_params=trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace( max_workspace_size_bytes=10 << 20, # Use a smaller workspace. precision_mode=precision_mode, is_dynamic_op=is_dynamic_op, maximum_cached_engines=maximum_cached_engines)) def _CheckTrtOps(self, concrete_func, check_fn=None): graph_def = concrete_func.graph.as_graph_def() trt_op_names = [] for node in graph_def.node: if node.op == "TRTEngineOp": trt_op_names.append(node.name) if check_fn: check_fn(node) for func in graph_def.library.function: for node in func.node_def: if node.op == "TRTEngineOp": trt_op_names.append(node.name) if check_fn: check_fn(node) self.assertEqual(1, len(trt_op_names)) self.assertIn("TRTEngineOp_0", trt_op_names[0]) def _RandomInput(self, shape, dtype=np.float32): inp1 = np.random.random_sample(shape).astype(dtype) inp2 = np.random.random_sample(shape).astype(dtype) return inp1, inp2 @test_util.run_v2_only def testTrtGraphConverter_DynamicConversion_v2(self): """Test case for trt_convert.TrtGraphConverter().""" if not is_tensorrt_enabled(): return np_input1, np_input2 = self._RandomInput([4, 1, 1]) # Create a model and save it. input_saved_model_dir = self.mkdtemp() root = self._GetModelForV2() expected_output = root.run(np_input1, np_input2) save.save(root, input_saved_model_dir, {_SAVED_MODEL_SIGNATURE_KEY: root.run}) # Run TRT conversion. converter = self._CreateConverterV2(input_saved_model_dir) converter.convert() # Verify the converted GraphDef and ConcreteFunction. self._CheckTrtOps(converter._converted_func) # pylint: disable=protected-access # Save the converted model without any TRT engine cache. output_saved_model_dir = self.mkdtemp() converter.save(output_saved_model_dir) unexpected_asset_file = os.path.join( output_saved_model_dir, "assets/trt-serialized-engine.TRTEngineOp_0") self.assertFalse(os.path.exists(unexpected_asset_file)) # Run the converted function to populate the engine cache. def _InputFn(): yield np_input1, np_input2 converter.build(input_fn=_InputFn) # Save the converted model again with serialized engine cache. output_saved_model_dir = self.mkdtemp() converter.save(output_saved_model_dir) expected_asset_file = os.path.join( output_saved_model_dir, "assets/trt-serialized-engine.TRTEngineOp_0") self.assertTrue(os.path.exists(expected_asset_file)) self.assertTrue(os.path.getsize(expected_asset_file)) del converter gc.collect() # Force GC to destroy the TRT engine cache. # Load and verify the converted model. # # TODO(laigd): the name of the new input_signature of the # `root_with_trt.run` function is empty string (originaly was None), # investigate why. root_with_trt = load.load(output_saved_model_dir) # TODO(laigd): `root_with_trt.run` is still using the original graph without # trt. Consider changing that. # self._CheckTrtOps(root_with_trt.run.get_concrete_function()) converted_signature = root_with_trt.signatures[_SAVED_MODEL_SIGNATURE_KEY] self._CheckTrtOps(converted_signature) output_with_trt = converted_signature( inp1=ops.convert_to_tensor(np_input1), inp2=ops.convert_to_tensor(np_input2)) # The output of running the converted signature is a dict due to # compatibility reasons with V1 SavedModel signature mechanism. self.assertAllClose( expected_output, list(output_with_trt.values())[0], atol=1e-6, rtol=1e-6) del root_with_trt gc.collect() # Force GC to destroy the TRT engine cache. @test_util.run_v2_only def testTrtGraphConverter_StaticConversionNotSupportedInV2(self): """Test case for trt_convert.TrtGraphConverter() using static mode.""" if not is_tensorrt_enabled(): return # Create a model and save it. input_saved_model_dir = self.mkdtemp() root = self._GetModelForV2() save.save(root, input_saved_model_dir, {_SAVED_MODEL_SIGNATURE_KEY: root.run}) # Run TRT conversion. converter = self._CreateConverterV2( input_saved_model_dir, is_dynamic_op=False) with self.assertRaisesRegexp( ValueError, r"Option is_dynamic_op=False is not supported in TF 2.0"): converter.convert() @test_util.run_v2_only def testTrtGraphConverter_Int8Conversion_v2(self): if not is_tensorrt_enabled(): return np_input1, np_input2 = self._RandomInput([4, 1, 1]) # Create a model and save it. input_saved_model_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) root = self._GetModelForV2() expected_output = root.run(np_input1, np_input2) save.save(root, input_saved_model_dir, {_SAVED_MODEL_SIGNATURE_KEY: root.run}) # Run TRT conversion. converter = self._CreateConverterV2( input_saved_model_dir, precision_mode=trt_convert.TrtPrecisionMode.INT8, maximum_cached_engines=3) # Convert and perform INT8 calibration def _CalibrationInputFn(): yield np_input1, np_input2 converter.convert(calibration_input_fn=_CalibrationInputFn) def _CheckFn(node): self.assertTrue(len(node.attr["calibration_data"].s), node.name) # Verify the converted GraphDef. self._CheckTrtOps(converter._converted_func, _CheckFn) # pylint: disable=protected-access # Build another engine with different batch size. def _InputFn(): yield self._RandomInput([5, 1, 1]) converter.build(input_fn=_InputFn) # Save the converted model. # TODO(laigd): check that it should contain two engines. output_saved_model_dir = self.mkdtemp() converter.save(output_saved_model_dir) expected_asset_file = os.path.join( output_saved_model_dir, "assets/trt-serialized-engine.TRTEngineOp_0") self.assertTrue(os.path.exists(expected_asset_file)) self.assertTrue(os.path.getsize(expected_asset_file)) del converter gc.collect() # Force GC to destroy the TRT engine cache. # Load and verify the converted model. root_with_trt = load.load(output_saved_model_dir) converted_signature = root_with_trt.signatures[_SAVED_MODEL_SIGNATURE_KEY] self._CheckTrtOps(converted_signature, _CheckFn) output_with_trt = converted_signature( inp1=ops.convert_to_tensor(np_input1), inp2=ops.convert_to_tensor(np_input2)) self.assertEqual(1, len(output_with_trt)) # The output of running the converted signature is a dict due to # compatibility reasons with V1 SavedModel signature mechanism. self.assertAllClose( expected_output, list(output_with_trt.values())[0], atol=1e-6, rtol=1e-6) # Run with an input of different batch size. It should build a new engine # using calibration table. # TODO(laigd): check that it should contain three engines. np_input1, np_input2 = self._RandomInput([6, 1, 1]) converted_signature( inp1=ops.convert_to_tensor(np_input1), inp2=ops.convert_to_tensor(np_input2)) del root_with_trt gc.collect() # Force GC to destroy the TRT engine cache. @test_util.run_v2_only def testTrtGraphConverter_DestroyEngineCache(self): """Test case for trt_convert.TrtGraphConverter().""" if not is_tensorrt_enabled(): return np_input1, np_input2 = self._RandomInput([4, 1, 1]) # Create a model and save it. input_saved_model_dir = self.mkdtemp() root = self._GetModelForV2() save.save(root, input_saved_model_dir, {_SAVED_MODEL_SIGNATURE_KEY: root.run}) # Run TRT conversion. converter = self._CreateConverterV2(input_saved_model_dir) converter.convert() def _InputFn(): yield np_input1, np_input2 converter.build(input_fn=_InputFn) # Populate the TRT engine cache. output_saved_model_dir = self.mkdtemp() converter.save(output_saved_model_dir) def _DestroyCache(): with ops.device("GPU:0"): handle = gen_trt_ops.create_trt_resource_handle( resource_name="TRTEngineOp_0") gen_resource_variable_ops.destroy_resource_op( handle, ignore_lookup_error=False) with self.assertRaisesRegexp(errors.NotFoundError, r"Resource .* does not exist."): _DestroyCache() # Load the converted model and make sure the engine cache is populated by # default. root = load.load(output_saved_model_dir) _DestroyCache() with self.assertRaisesRegexp(errors.NotFoundError, r"Resource .* does not exist."): _DestroyCache() # Load the converted model again and make sure the engine cache is destroyed # when the model goes out of scope. root = load.load(output_saved_model_dir) del root gc.collect() # Force GC to destroy the TRT engine cache. with self.assertRaisesRegexp(errors.NotFoundError, r"Resource .* does not exist."): _DestroyCache() def _CompareSavedModel(self, model_class): signature_key = "serving_default" def _GetModelPaths(model_class): input_saved_model_dir = self.mkdtemp() root = model_class() save.save(root, input_saved_model_dir) converter = self._CreateConverterV2( input_saved_model_dir, input_saved_model_signature_key=signature_key) converter.convert() output_saved_model_dir = self.mkdtemp() converter.save(output_saved_model_dir) return input_saved_model_dir, output_saved_model_dir def _GetSignatureDef(export_dir): saved_model_proto = loader_impl.parse_saved_model(export_dir) self.assertEqual(1, len(saved_model_proto.meta_graphs)) meta_graph = saved_model_proto.meta_graphs[0] self.assertIn(signature_key, meta_graph.signature_def) return meta_graph.signature_def[signature_key] def _CompareSignatureDef(original_def, converted_def, is_input): endpoints = original_def.inputs if is_input else original_def.outputs converted_endpoints = ( converted_def.inputs if is_input else converted_def.outputs) self.assertEqual(set(endpoints.keys()), set(converted_endpoints.keys())) for key in endpoints: original_input = endpoints[key] converted_input = converted_endpoints[key] self.assertEqual(original_input.name, converted_input.name) self.assertEqual(original_input.dtype, converted_input.dtype) self.assertEqual( tensor_shape.TensorShape(original_input.tensor_shape).as_list(), tensor_shape.TensorShape(converted_input.tensor_shape).as_list()) def _GetStructuredOutputs(export_dir): root = load.load(export_dir) return root.signatures[signature_key].structured_outputs saved_model_path, converted_saved_model_path = _GetModelPaths(model_class) original_def = _GetSignatureDef(saved_model_path) converted_def = _GetSignatureDef(converted_saved_model_path) self.assertEqual(original_def.method_name, converted_def.method_name) _CompareSignatureDef(original_def, converted_def, True) _CompareSignatureDef(original_def, converted_def, False) self.assertEqual( _GetStructuredOutputs(saved_model_path), _GetStructuredOutputs(converted_saved_model_path)) @test_util.run_v2_only def testRetainSignatureInfo_NoInputs(self): class _Model(tracking.AutoTrackable): @def_function.function(input_signature=[]) def run(self): return array_ops.constant(1.0) self._CompareSavedModel(_Model) @test_util.run_v2_only def testRetainSignatureInfo_OneInput(self): class _Model(tracking.AutoTrackable): @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[None, 1], dtype=dtypes.float32) ]) def run(self, inp): return inp + inp * inp self._CompareSavedModel(_Model) @test_util.run_v2_only def testRetainSignatureInfo_TwoInputs(self): class _Model(tracking.AutoTrackable): @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[None, 1], dtype=dtypes.float32), tensor_spec.TensorSpec(shape=[None, 2], dtype=dtypes.float32) ]) def run(self, inp1, inp2): return inp1 + inp2 * inp2 self._CompareSavedModel(_Model) @test_util.run_v2_only def testRetainSignatureInfo_OneOutputSignatureKey(self): class _Model(tracking.AutoTrackable): @def_function.function(input_signature=[]) def run(self): return {"my_output": array_ops.constant(1.0)} self._CompareSavedModel(_Model) @test_util.run_v2_only def testRetainSignatureInfo_TwoOutputSignatureKeys(self): class _Model(tracking.AutoTrackable): @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[None, 1], dtype=dtypes.float32) ]) def run(self, inp): # Here the keys are not ordered lexicographically on purpose. return { "output_b": array_ops.constant(1.0), "output_a": inp + inp * inp } self._CompareSavedModel(_Model) def _TestRun(self, sess, batch_size, expect_engine_is_run=True): result = sess.run( "output:0", feed_dict={ "input1:0": [[[1.0]]] * batch_size, "input2:0": [[[1.0]]] * batch_size }) self.assertAllEqual([[[5.0]]] * batch_size, result) @test_util.deprecated_graph_mode_only def testTrtGraphConverter_MinimumSegmentSize(self): if not is_tensorrt_enabled(): return output_graph_def = self._ConvertGraph(minimum_segment_size=7) node_name_to_op = {node.name: node.op for node in output_graph_def.node} self.assertEqual( { "add/ReadVariableOp": "Const", "input1": "Placeholder", "input2": "Placeholder", "add": "AddV2", "mul": "Mul", "add_1": "AddV2", "add_2": "AddV2", "output": "Identity" }, node_name_to_op) @test_util.deprecated_graph_mode_only def testTrtGraphConverter_DynamicOp(self): if not is_tensorrt_enabled(): return input_saved_model_dir = self.mkdtemp() output_saved_model_dir = self.mkdtemp() self._WriteInputSavedModel(input_saved_model_dir) output_graph_def = self._ConvertGraph( input_saved_model_dir=input_saved_model_dir, output_saved_model_dir=output_saved_model_dir, is_dynamic_op=True, maximum_cached_engines=2) # Test the output GraphDef. with ops.Graph().as_default(): importer.import_graph_def(output_graph_def, name="") with self.session(config=self._GetConfigProto()) as sess: # Run with batch size 1, a new engine is created and cached. self._TestRun(sess, 1) # Run with batch size 2, a new engine is created and cached. self._TestRun(sess, 2) # Run with batch size 3, since the number of cached engines has reached # the max, it should evict an old engine and create a new one. self._TestRun(sess, 3) # Test the output SavedModel with ops.Graph().as_default(): with self.session(config=self._GetConfigProto()) as sess: loader.load(sess, [tag_constants.SERVING], output_saved_model_dir) # Run with batch size 1, a new engine is created and cached. self._TestRun(sess, 1) # Run with batch size 2, a new engine is created and cached. self._TestRun(sess, 2) # Run with batch size 3, since the number of cached engines has reached # the max, it should evict an old engine and create a new one. self._TestRun(sess, 3) def _TestStaticOp(self): if not is_tensorrt_enabled(): return input_saved_model_dir = self.mkdtemp() output_saved_model_dir = self.mkdtemp() self._WriteInputSavedModel(input_saved_model_dir) output_graph_def = self._ConvertGraph( input_saved_model_dir=input_saved_model_dir, output_saved_model_dir=output_saved_model_dir, maximum_cached_engines=2) # Test the output GraphDef. with ops.Graph().as_default(): importer.import_graph_def(output_graph_def, name="") with self.session(config=self._GetConfigProto()) as sess: # Run with batch size 1, the default engine embedded in the graphdef # will be used. self._TestRun(sess, 1, expect_engine_is_run=True) # Run with batch size 2, which exceed the max_batch_size, it should try # to fall back to TF function. self._TestRun(sess, 2, expect_engine_is_run=False) # Test the output SavedModel with ops.Graph().as_default(): with self.session(config=self._GetConfigProto()) as sess: loader.load(sess, [tag_constants.SERVING], output_saved_model_dir) # Run with batch size 1, the default engine embedded in the graphdef # will be used. self._TestRun(sess, 1, expect_engine_is_run=True) # Run with batch size 2, which exceed the max_batch_size, it should try # to fall back to TF function. self._TestRun(sess, 2, expect_engine_is_run=False) @test_util.deprecated_graph_mode_only def testTrtGraphConverter_StaticOp(self): self._TestStaticOp() if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/trt_convert_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Exposes the python wrapper for TensorRT graph transforms.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=unused-import,line-too-long from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: enable=unused-import,line-too-long
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/__init__.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic tests for TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.platform import test class SimpleSingleEngineTest(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): """Create a graph containing single segment.""" dtype = inp.dtype conv_filter = constant_op.constant( [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]], name="weights", dtype=dtype) conv = nn.conv2d( input=inp, filter=conv_filter, strides=[1, 2, 2, 1], padding="SAME", name="conv") bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtype) added = nn.bias_add(conv, bias, name="bias_add") relu = nn.relu(added, "relu") identity = array_ops.identity(relu, "identity") pool = nn_ops.max_pool( identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool") return array_ops.squeeze(pool, name="output_0") def GetParams(self): # TODO(aaroey): test graph with different dtypes. return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]], [[100, 6, 6, 6]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": [ "weights", "conv", "bias", "bias_add", "relu", "identity", "max_pool" ] } class SimpleMultiEnginesTest(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): """Create a graph containing multiple segment.""" dtype = inp.dtype conv_filter = constant_op.constant( [[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]], name="weights", dtype=dtype) conv = nn.conv2d( input=inp, filter=conv_filter, strides=[1, 2, 2, 1], padding="SAME", name="conv") c1 = constant_op.constant( np.random.randn(12, 12, 6), dtype=dtype, name="c1") p = math_ops.mul(conv, c1, name="mul") c2 = constant_op.constant( np.random.randn(12, 12, 6), dtype=dtype, name="c2") q = math_ops.div(conv, c2, name="div") edge = self.trt_incompatible_op(q, name="incompatible") edge = math_ops.div(edge, edge, name="div1") r = math_ops.add(edge, edge, name="add") p = math_ops.sub(p, edge, name="sub") q = math_ops.mul(q, edge, name="mul1") s = math_ops.add(p, q, name="add1") s = math_ops.sub(s, r, name="sub1") return array_ops.squeeze(s, name="output_0") def GetParams(self): # TODO(aaroey): test graph with different dtypes. return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]], [[100, 12, 12, 6]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": [ "add", "add1", "c1", "div1", "mul", "mul1", "sub", "sub1" ], "TRTEngineOp_1": ["c2", "conv", "div", "weights"] } def GetConversionParams(self, run_params): """Return a ConversionParams for test.""" return super( SimpleMultiEnginesTest, self ).GetConversionParams(run_params)._replace( # Disable layout optimizer, since it'll add Transpose(Const, Const) to # the graph and breaks the conversion check. rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig()) class SimpleMultiEnginesTest2(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): """Create a graph containing two segment.""" n = inp for i in range(2): c = constant_op.constant(1.0, name="c%d" % i) n = math_ops.add(n, c, name="add%d" % i) n = math_ops.mul(n, n, name="mul%d" % i) edge = self.trt_incompatible_op(n, name="incompatible") with ops.control_dependencies([edge]): c = constant_op.constant(1.0, name="c2") n = math_ops.add(n, c, name="add2") n = math_ops.mul(n, n, name="mul2") c = constant_op.constant(1.0, name="c3") n = math_ops.add(n, c, name="add3") n = math_ops.mul(n, n, name="mul3") return array_ops.squeeze(n, name="output_0") def GetParams(self): shapes = [[2, 32, 32, 3]] return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes, output_shapes=shapes) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": ["c0", "c1", "add0", "add1", "mul0", "mul1"], "TRTEngineOp_1": ["c2", "c3", "add2", "add3", "mul2", "mul3"] } def ShouldRunTest(self, run_params): """Whether to run the test.""" # Disable the test in fp16 mode since multiple matmul and add ops together # can cause overflow. return ((run_params.precision_mode != "FP16") and not (trt_test.IsQuantizationMode(run_params.precision_mode) and not run_params.use_calibration)) class ConstInputTest(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): """Create a graph containing multiple segment.""" n = inp c = constant_op.constant(1.0, name="c") # Adds control dependency from the constant op to a trt incompatible op, # and adds control dependency from the trt incompatible op to all other # ops, to make sure the constant op cannot be contracted with any trt # segment that depends on it. with ops.control_dependencies([c]): d = self.trt_incompatible_op(n, name="incompatible") with ops.control_dependencies([d]): n = math_ops.add(n, c, name="add") n = math_ops.mul(n, n, name="mul") n = math_ops.add(n, n, name="add1") n = self.trt_incompatible_op(n, name="incompatible1") with ops.control_dependencies([d]): n = math_ops.add(n, c, name="add2") n = math_ops.mul(n, n, name="mul1") n = math_ops.add(n, n, name="add3") return array_ops.squeeze(n, name="output_0") def GetParams(self): shapes = [[2, 32, 32, 3]] return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes, output_shapes=shapes) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": ["add", "add1", "mul"], "TRTEngineOp_1": ["add2", "add3", "mul1"] } class ConstDataInputSingleEngineTest(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): """Create a graph containing single segment.""" n = inp c = constant_op.constant(1.0, name="c") n = math_ops.add(n, c, name="add") n = math_ops.mul(n, n, name="mul") n = math_ops.add(n, n, name="add1") return array_ops.squeeze(n, name="output_0") def GetParams(self): shapes = [[2, 32, 32, 3]] return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes, output_shapes=shapes) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return {"TRTEngineOp_0": ["c", "add", "add1", "mul"]} class ConstDataInputMultipleEnginesTest(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): """Create a graph containing multiple segment.""" n = inp c = constant_op.constant(1.0, name="c") n = math_ops.add(n, c, name="add") n = math_ops.mul(n, n, name="mul") n = math_ops.add(n, n, name="add1") n = self.trt_incompatible_op(n, name="incompatible1") n = math_ops.add(n, c, name="add2") n = math_ops.mul(n, n, name="mul1") n = math_ops.add(n, n, name="add3") return array_ops.squeeze(n, name="output_0") def GetParams(self): shapes = [[2, 32, 32, 3]] return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes, output_shapes=shapes) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": ["add2", "add3", "mul1"], # Why segment ["add", "add1", "mul"] was assigned segment id 1 # instead of 0: the parent node of this segment is actually const # node 'c', but it's removed later since it's const output of the # segment which is not allowed. "TRTEngineOp_1": ["add", "add1", "mul"] } class ControlDependencyTest(trt_test.TfTrtIntegrationTestBase): def GraphFn(self, inp): """Create a graph containing multiple segment.""" c1 = constant_op.constant(1.0, name="c1") c2 = constant_op.constant(1.0, name="c2") d1 = constant_op.constant(1.0, name="d1") d2 = self.trt_incompatible_op(inp, name="d2") with ops.control_dependencies([d1, d2]): add = math_ops.add(inp, c1, name="add") with ops.control_dependencies([d1, d2]): mul = math_ops.mul(add, add, name="mul") with ops.control_dependencies([d1, d2]): add1 = math_ops.add(mul, mul, name="add1") edge = self.trt_incompatible_op(add1, name="incompatible") with ops.control_dependencies([d1, d2, add, mul]): add2 = math_ops.add(edge, c2, name="add2") with ops.control_dependencies([d1, d2, add1, mul]): mul1 = math_ops.mul(add2, add2, name="mul1") with ops.control_dependencies([d1, d2, add, add1]): add3 = math_ops.add(mul1, mul1, name="add3") return array_ops.squeeze(add3, name="output_0") def GetParams(self): shapes = [[2, 32, 32, 3]] return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes, output_shapes=shapes) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return { "TRTEngineOp_0": ["c1", "add", "add1", "mul"], "TRTEngineOp_1": ["c2", "add2", "add3", "mul1"] } if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/test/base_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model script to test TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tf2tensorrt.wrap_py_utils import get_linked_tensorrt_version from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test def _GraphFn(x, add_quantization_nodes): def _Quantize(x, r): if add_quantization_nodes: x = gen_array_ops.fake_quant_with_min_max_vars(x, -r, r) return x x = _Quantize(x, 10.0) x = x + 5 x = _Quantize(x, 15.0) x = x - 5 x = _Quantize(x, 10.0) x = x * 0.1 x = _Quantize(x, 1.0) w = constant_op.constant(np.ones((8, 1)), dtype=dtypes.float32) x = math_ops.matmul(x, w) x = _Quantize(x, 10.0) return array_ops.identity(x, name="output_0") def _GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[8, 8]], [[8, 1]]) class QuantizationMissingAllRangesTest(trt_test.TfTrtIntegrationTestBase): """Create a graph containing single segment with no quantization ranges.""" def GraphFn(self, x): return _GraphFn(x, add_quantization_nodes=False) def GetParams(self): return _GetParams(self) def ShouldRunTest(self, run_params): if get_linked_tensorrt_version()[0] < 5: return False # Only test static engine mode, with or without calibration. return (trt_test.IsQuantizationMode(run_params.precision_mode) and not run_params.convert_online and not run_params.dynamic_engine) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" # In static engine mode with calibration, it should build a calibration # engine. # In static engine mode without calibration, the engine building will # succeed but fall back to non-quantized ops. return ["TRTEngineOp_0"] class QuantizationWithRangesTest(trt_test.TfTrtIntegrationTestBase): """Create a graph containing single segment with no quantization ranges.""" def GraphFn(self, x): return _GraphFn(x, add_quantization_nodes=True) def GetParams(self): return _GetParams(self) def ShouldRunTest(self, run_params): if get_linked_tensorrt_version()[0] < 5: return False # Test static/dynamic engine with/without calibration. return (trt_test.IsQuantizationMode(run_params.precision_mode) and not run_params.convert_online) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ["TRTEngineOp_0"] def ExpectedAbsoluteTolerance(self, run_params): """The absolute tolerance to compare floating point results.""" return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01 def ExpectedRelativeTolerance(self, run_params): """The relative tolerance to compare floating point results.""" return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01 class NonQuantizedPrecisionsWithRangesTest(trt_test.TfTrtIntegrationTestBase): """Create a graph containing single segment with no quantization ranges.""" def GraphFn(self, x): return _GraphFn(x, add_quantization_nodes=True) def GetParams(self): return _GetParams(self) def ShouldRunTest(self, run_params): # Only test FP32/FP16 mode. return not trt_test.IsQuantizationMode(run_params.precision_mode) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" # The fake quant ops are not supported in FP32/FP16 mode, and will split the # graph into three TRT segments. return ["TRTEngineOp_0", "TRTEngineOp_1", "TRTEngineOp_2", "TRTEngineOp_3"] def ExpectedAbsoluteTolerance(self, run_params): """The absolute tolerance to compare floating point results.""" return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01 def ExpectedRelativeTolerance(self, run_params): """The relative tolerance to compare floating point results.""" return 1.e-05 if run_params.precision_mode == "FP32" else 1.e-01 if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/test/quantization_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model script to test TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.platform import test class ConcatenationTest(trt_test.TfTrtIntegrationTestBase): """Testing Concatenation in TF-TRT conversion.""" def GraphFn(self, x): dtype = x.dtype # scale a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r1 = x / a a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r2 = a / x a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype) r3 = a + x a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype) r4 = x * a a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r5 = x - a a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r6 = a - x a = constant_op.constant(np.random.randn(3, 1), dtype=dtype) r7 = x - a a = constant_op.constant(np.random.randn(3, 1), dtype=dtype) r8 = a - x a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype) r9 = gen_math_ops.maximum(x, a) a = constant_op.constant(np.random.randn(3, 1), dtype=dtype) r10 = gen_math_ops.minimum(a, x) a = constant_op.constant(np.random.randn(3), dtype=dtype) r11 = x * a a = constant_op.constant(np.random.randn(1), dtype=dtype) r12 = a * x concat1 = array_ops.concat([r1, r2, r3, r4, r5, r6], axis=-1) concat2 = array_ops.concat([r7, r8, r9, r10, r11, r12], axis=3) x = array_ops.concat([concat1, concat2], axis=-1) return gen_array_ops.reshape(x, [2, -1], name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 3, 1]], [[2, 126]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ["TRTEngineOp_0"] if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/test/concatenation_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model script to test TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_impl from tensorflow.python.ops import nn_ops from tensorflow.python.platform import test class VGGBlockTest(trt_test.TfTrtIntegrationTestBase): """Single vgg layer test in TF-TRT conversion.""" def GraphFn(self, x): dtype = x.dtype x, _, _ = nn_impl.fused_batch_norm( x, [1.0, 1.0], [0.0, 0.0], mean=[0.5, 0.5], variance=[1.0, 1.0], is_training=False) e = constant_op.constant( np.random.randn(1, 1, 2, 6), name="weights", dtype=dtype) conv = nn.conv2d( input=x, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv") b = constant_op.constant(np.random.randn(6), name="bias", dtype=dtype) t = nn.bias_add(conv, b, name="biasAdd") relu = nn.relu(t, "relu") idty = array_ops.identity(relu, "ID") v = nn_ops.max_pool( idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool") return array_ops.squeeze(v, name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[5, 8, 8, 2]], [[5, 2, 2, 6]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ["TRTEngineOp_0"] if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/test/vgg_block_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model script to test TF-TensorRT integration.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gen_array_ops from tensorflow.python.ops import gen_math_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class UnaryTest(trt_test.TfTrtIntegrationTestBase): """Test for unary operations in TF-TRT.""" def GraphFn(self, x1, x2): x = x1 q = math_ops.abs(x) q = q + 1.0 q = gen_math_ops.exp(q) q = gen_math_ops.log(q) q = array_ops.squeeze(q, axis=-2) q = math_ops.abs(q) q = q + 2.2 q = gen_math_ops.sqrt(q) q = gen_math_ops.rsqrt(q) q = math_ops.negative(q) q = array_ops.squeeze(q, axis=3) q = math_ops.abs(q) q = q + 3.0 a = gen_math_ops.reciprocal(q) x = constant_op.constant(np.random.randn(5, 8, 12), dtype=x.dtype) q = math_ops.abs(x) q = q + 2.0 q = gen_math_ops.exp(q) q = gen_math_ops.log(q) q = math_ops.abs(q) q = q + 2.1 q = gen_math_ops.sqrt(q) q = gen_math_ops.rsqrt(q) q = math_ops.negative(q) q = math_ops.abs(q) q = q + 4.0 b = gen_math_ops.reciprocal(q) # TODO(jie): this one will break, broadcasting on batch. x = x2 q = math_ops.abs(x) q = q + 5.0 q = gen_math_ops.exp(q) q = array_ops.squeeze(q, axis=[-1, -2, 3]) q = gen_math_ops.log(q) q = math_ops.abs(q) q = q + 5.1 q = gen_array_ops.reshape(q, [12, 5, 1, 1, 8, 1, 12]) q = array_ops.squeeze(q, axis=[5, 2, 3]) q = gen_math_ops.sqrt(q) q = math_ops.abs(q) q = q + 5.2 q = gen_math_ops.rsqrt(q) q = math_ops.negative(q) q = math_ops.abs(q) q = q + 5.3 c = gen_math_ops.reciprocal(q) q = a * b q = q / c return array_ops.squeeze(q, name="output_0") def GetParams(self): return self.BuildParams(self.GraphFn, dtypes.float32, [[12, 5, 8, 1, 1, 12], [12, 5, 8, 1, 12, 1, 1]], [[12, 5, 8, 12]]) def ExpectedEnginesToBuild(self, run_params): """Return the expected engines to build.""" return ["TRTEngineOp_0"] if __name__ == "__main__": test.main()
tensorflow-r1.15.5-nv23.03
tensorflow/python/compiler/tensorrt/test/unary_test.py