python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.make_batched_features_dataset()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MakeBatchedFeaturesDatasetTest(
reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase):
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
# Basic test: read from file 0.
self.outputs = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames[0],
label_key="label",
num_epochs=num_epochs,
batch_size=batch_size))
self.verify_records(
batch_size, 0, num_epochs=num_epochs, label_key_provided=True)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(label_key_provided=True)
# Basic test: read from file 1.
self.outputs = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames[1],
label_key="label",
num_epochs=num_epochs,
batch_size=batch_size))
self.verify_records(
batch_size, 1, num_epochs=num_epochs, label_key_provided=True)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(label_key_provided=True)
# Basic test: read from both files.
self.outputs = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames,
label_key="label",
num_epochs=num_epochs,
batch_size=batch_size))
self.verify_records(
batch_size, num_epochs=num_epochs, label_key_provided=True)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(label_key_provided=True)
# Basic test: read from both files.
self.outputs = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size))
self.verify_records(batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch()
def testReadWithEquivalentDataset(self):
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (
core_readers.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10).batch(2))
next_element = self.getNext(dataset)
for file_batch, _, _, _, record_batch, _ in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = self.evaluate(next_element())
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testReadWithFusedShuffleRepeatDataset(self):
num_epochs = 5
total_records = num_epochs * self._num_records
for batch_size in [1, 2]:
# Test that shuffling with same seed produces the same result.
outputs1 = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5))
outputs2 = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5))
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1)
batch2 = self._run_actual_batch(outputs2)
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
# Test that shuffling with different seeds produces a different order.
outputs1 = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5))
outputs2 = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=15))
all_equal = True
for _ in range(total_records // batch_size):
batch1 = self._run_actual_batch(outputs1)
batch2 = self._run_actual_batch(outputs2)
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
def testParallelReadersAndParsers(self):
num_epochs = 5
for batch_size in [1, 2]:
for reader_num_threads in [2, 4]:
for parser_num_threads in [2, 4]:
self.outputs = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames,
label_key="label",
num_epochs=num_epochs,
batch_size=batch_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads))
self.verify_records(
batch_size,
num_epochs=num_epochs,
label_key_provided=True,
interleave_cycle_length=reader_num_threads)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(label_key_provided=True)
self.outputs = self.getNext(
self.make_batch_feature(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
reader_num_threads=reader_num_threads,
parser_num_threads=parser_num_threads))
self.verify_records(
batch_size,
num_epochs=num_epochs,
interleave_cycle_length=reader_num_threads)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch()
def testDropFinalBatch(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default():
# Basic test: read from file 0.
outputs = self.make_batch_feature(
filenames=self.test_filenames[0],
label_key="label",
num_epochs=num_epochs,
batch_size=batch_size,
drop_final_batch=True)
for tensor in nest.flatten(outputs):
if isinstance(tensor, ops.Tensor): # Guard against SparseTensor.
self.assertEqual(tensor.shape[0], batch_size)
def testIndefiniteRepeatShapeInference(self):
dataset = self.make_batch_feature(
filenames=self.test_filenames[0],
label_key="label",
num_epochs=None,
batch_size=32)
for shape, clazz in zip(
nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)),
nest.flatten(dataset_ops.get_legacy_output_classes(dataset))):
if issubclass(clazz, ops.Tensor):
self.assertEqual(32, shape[0])
def testOldStyleReader(self):
with self.assertRaisesRegexp(
TypeError, r"The `reader` argument must return a `Dataset` object. "
r"`tf.ReaderBase` subclasses are not supported."):
_ = readers.make_batched_features_dataset(
file_pattern=self.test_filenames[0], batch_size=32,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string),
"label": parsing_ops.FixedLenFeature([], dtypes.string),
},
reader=io_ops.TFRecordReader)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/make_batched_features_dataset_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_AutoShardDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def chunk(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
@test_util.run_all_in_graph_and_eager_modes
class AutoShardDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self.test_filenames = self._createFiles()
def assertDatasetProducesWithShuffle(self, dataset, expected, batch,
num_examples, shuffle):
if shuffle:
actual = []
next_fn = self.getNext(dataset)
for _ in range(num_examples):
elem = self.evaluate(next_fn())
if isinstance(elem, tuple):
actual.extend(elem)
else:
actual.extend(elem.tolist())
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
else:
self.assertDatasetProduces(dataset, list(chunk(expected, batch)))
@parameterized.parameters(True, False)
def testFlatMapReaderPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (3, 8)
for r in range(0, 10)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
def testZipReaderPipeline(self):
dataset1 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=False)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=False)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
(b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProduces(dataset, expected)
@parameterized.parameters(True, False)
def testConcatenateReaderPipeline(self, shuffle):
dataset1 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset1 = dataset1.batch(5)
dataset2 = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset2.batch(5)
dataset = dataset1.concatenate(dataset2)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
expected += expected
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle)
@parameterized.parameters(True, False)
def testPipelineWithMap(self, shuffle):
dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
def testDirectFilenameTFRecordReaderPipeline(self):
dataset = core_readers.TFRecordDataset(self.test_filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@parameterized.parameters(True, False)
def testValidPipelineWithRangeDataset(self, shuffle):
dataset = dataset_ops.Dataset.range(self._num_files)
dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda
[self.get_temp_dir(),
string_ops.string_format("/tf_record.{}.txt", [n])]))
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@parameterized.parameters((1, 0, 10, 10), (2, 1, 20, 5), (10, 1, 1, 10))
def testStandardReaderPipeline(self, num_epochs, index, batch_size,
parallel_reads):
dataset = readers.make_tf_record_dataset(
file_pattern=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=None,
num_parallel_reads=parallel_reads,
drop_final_batch=True,
shuffle=False)
dataset = distribute._AutoShardDataset(dataset, 2, index)
outputs = self.getNext(dataset)
self._verify_records(
outputs,
batch_size=batch_size,
file_index=[i for i in range(index, self._num_records, 2)],
num_epochs=num_epochs,
interleave_cycle_length=parallel_reads,
drop_final_batch=True,
use_parser_fn=None)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(outputs())
@parameterized.parameters(True, False)
def testSampleResNetPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self.test_filenames, shuffle=shuffle)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
def testWorkersGreaterThanNumFiles(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 500, 499)
self.assertDatasetProduces(dataset, [])
def testTFRecordReaderWithDirectFileNames(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self.test_filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
def testTFRecordReaderWithDirectFileNamesAndShapes(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self.test_filenames)
# BatchDataset contains `output_types` and `output_shapes`
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 5)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
def testShardOutOfRange(self):
dataset = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
def testShardOutOfRangeEmptyDataset(self):
dataset = dataset_ops.Dataset.range(0)
with self.assertRaises(errors.OutOfRangeError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
def testNoReaderPipelines(self):
dataset = dataset_ops.Dataset.range(1024)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [i for i in range(1024) if i % 2 == 0])
def testUnknownOpInPipelineStillShardsAtTheEnd(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.apply(unique.unique())
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
def testInvalidWorkerIndex(self):
dataset = dataset_ops.Dataset.list_files(self.test_filenames)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 2, 2)
self.evaluate(self.getNext(dataset)())
@test_util.run_all_in_graph_and_eager_modes
class AutoShardTextLineDatasetTest(
reader_dataset_ops_test_base.TextLineDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardTextLineDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self.test_filenames = self._createFiles(self._num_files, self._num_records)
def testDirectFilenameTextLineReaderPipeline(self):
dataset = core_readers.TextLineDataset(self.test_filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"%d: %d" % (f, r) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/auto_shard_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.CsvDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class CsvDatasetTest(test_base.DatasetTestBase):
def _setup_files(self, inputs, linebreak='\n', compression_type=None):
filenames = []
for i, ip in enumerate(inputs):
fn = os.path.join(self.get_temp_dir(), 'temp_%d.csv' % i)
contents = linebreak.join(ip).encode('utf-8')
if compression_type is None:
with open(fn, 'wb') as f:
f.write(contents)
elif compression_type == 'GZIP':
with gzip.GzipFile(fn, 'wb') as f:
f.write(contents)
elif compression_type == 'ZLIB':
contents = zlib.compress(contents)
with open(fn, 'wb') as f:
f.write(contents)
else:
raise ValueError('Unsupported compression_type', compression_type)
filenames.append(fn)
return filenames
def _make_test_datasets(self, inputs, **kwargs):
# Test by comparing its output to what we could get with map->decode_csv
filenames = self._setup_files(inputs)
dataset_expected = core_readers.TextLineDataset(filenames)
dataset_expected = dataset_expected.map(
lambda l: parsing_ops.decode_csv(l, **kwargs))
dataset_actual = readers.CsvDataset(filenames, **kwargs)
return (dataset_actual, dataset_expected)
def _test_by_comparison(self, inputs, **kwargs):
"""Checks that CsvDataset is equiv to TextLineDataset->map(decode_csv)."""
dataset_actual, dataset_expected = self._make_test_datasets(
inputs, **kwargs)
self.assertDatasetsEqual(dataset_actual, dataset_expected)
def _verify_output_or_err(self,
dataset,
expected_output=None,
expected_err_re=None):
if expected_err_re is None:
# Verify that output is expected, without errors
nxt = self.getNext(dataset)
expected_output = [[
v.encode('utf-8') if isinstance(v, str) else v for v in op
] for op in expected_output]
for value in expected_output:
op = self.evaluate(nxt())
self.assertAllEqual(op, value)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(nxt())
else:
nxt = self.getNext(dataset)
while True:
try:
self.evaluate(nxt())
except errors.OutOfRangeError:
break
def _test_dataset(
self,
inputs,
expected_output=None,
expected_err_re=None,
linebreak='\n',
compression_type=None, # Used for both setup and parsing
**kwargs):
"""Checks that elements produced by CsvDataset match expected output."""
# Convert str type because py3 tf strings are bytestrings
filenames = self._setup_files(inputs, linebreak, compression_type)
kwargs['compression_type'] = compression_type
if expected_err_re is not None:
# Verify that OpError is produced as expected
with self.assertRaisesOpError(expected_err_re):
dataset = readers.CsvDataset(filenames, **kwargs)
self._verify_output_or_err(dataset, expected_output, expected_err_re)
else:
dataset = readers.CsvDataset(filenames, **kwargs)
self._verify_output_or_err(dataset, expected_output, expected_err_re)
def testCsvDataset_requiredFields(self):
record_defaults = [[]] * 4
inputs = [['1,2,3,4']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_int(self):
record_defaults = [[0]] * 4
inputs = [['1,2,3,4', '5,6,7,8']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_float(self):
record_defaults = [[0.0]] * 4
inputs = [['1.0,2.1,3.2,4.3', '5.4,6.5,7.6,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_string(self):
record_defaults = [['']] * 4
inputs = [['1.0,2.1,hello,4.3', '5.4,6.5,goodbye,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withEmptyFields(self):
record_defaults = [[0]] * 4
inputs = [[',,,', '1,1,1,', ',2,2,2']]
self._test_dataset(
inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]],
record_defaults=record_defaults)
def testCsvDataset_errWithUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4']]
self._test_dataset(
inputs,
expected_err_re='Unquoted fields cannot have quotes inside',
record_defaults=record_defaults)
def testCsvDataset_errWithUnescapedQuotes(self):
record_defaults = [['']] * 3
inputs = [['"a"b","c","d"']]
self._test_dataset(
inputs,
expected_err_re=
'Quote inside a string has to be escaped by another quote',
record_defaults=record_defaults)
def testCsvDataset_ignoreErrWithUnescapedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,"2"3",4', '1,"2"3",4",5,5', 'a,b,"c"d"', 'e,f,g']]
filenames = self._setup_files(inputs)
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
dataset = dataset.apply(error_ops.ignore_errors())
self._verify_output_or_err(dataset, [['e', 'f', 'g']])
def testCsvDataset_ignoreErrWithUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4', 'a,b,c"d', '9,8"7,6,5', 'e,f,g']]
filenames = self._setup_files(inputs)
dataset = readers.CsvDataset(filenames, record_defaults=record_defaults)
dataset = dataset.apply(error_ops.ignore_errors())
self._verify_output_or_err(dataset, [['e', 'f', 'g']])
def testCsvDataset_withNoQuoteDelimAndUnquotedQuotes(self):
record_defaults = [['']] * 3
inputs = [['1,2"3,4']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, use_quote_delim=False)
def testCsvDataset_mixedTypes(self):
record_defaults = [
constant_op.constant([], dtype=dtypes.int32),
constant_op.constant([], dtype=dtypes.float32),
constant_op.constant([], dtype=dtypes.string),
constant_op.constant([], dtype=dtypes.float64)
]
inputs = [['1,2.1,3.2,4.3', '5,6.5,7.6,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withUseQuoteDelimFalse(self):
record_defaults = [['']] * 4
inputs = [['1,2,"3,4"', '"5,6",7,8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, use_quote_delim=False)
def testCsvDataset_withFieldDelim(self):
record_defaults = [[0]] * 4
inputs = [['1:2:3:4', '5:6:7:8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, field_delim=':')
def testCsvDataset_withNaValue(self):
record_defaults = [[0]] * 4
inputs = [['1,NA,3,4', 'NA,6,7,8']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, na_value='NA')
def testCsvDataset_withSelectCols(self):
record_defaults = [['']] * 2
inputs = [['1,2,3,4', '"5","6","7","8"']]
self._test_by_comparison(
inputs, record_defaults=record_defaults, select_cols=[1, 2])
def testCsvDataset_withSelectColsTooHigh(self):
record_defaults = [[0]] * 2
inputs = [['1,2,3,4', '5,6,7,8']]
self._test_dataset(
inputs,
expected_err_re='Expect 2 fields but have 1 in record',
record_defaults=record_defaults,
select_cols=[3, 4])
def testCsvDataset_withOneCol(self):
record_defaults = [['NA']]
inputs = [['0', '', '2']]
self._test_dataset(
inputs, [['0'], ['NA'], ['2']], record_defaults=record_defaults)
def testCsvDataset_withMultipleFiles(self):
record_defaults = [[0]] * 4
inputs = [['1,2,3,4', '5,6,7,8'], ['5,6,7,8']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withLeadingAndTrailingSpaces(self):
record_defaults = [[0.0]] * 4
inputs = [['0, 1, 2, 3']]
expected = [[0.0, 1.0, 2.0, 3.0]]
self._test_dataset(inputs, expected, record_defaults=record_defaults)
def testCsvDataset_errorWithMissingDefault(self):
record_defaults = [[]] * 2
inputs = [['0,']]
self._test_dataset(
inputs,
expected_err_re='Field 1 is required but missing in record!',
record_defaults=record_defaults)
def testCsvDataset_errorWithFewerDefaultsThanFields(self):
record_defaults = [[0.0]] * 2
inputs = [['0,1,2,3']]
self._test_dataset(
inputs,
expected_err_re='Expect 2 fields but have more in record',
record_defaults=record_defaults)
def testCsvDataset_errorWithMoreDefaultsThanFields(self):
record_defaults = [[0.0]] * 5
inputs = [['0,1,2,3']]
self._test_dataset(
inputs,
expected_err_re='Expect 5 fields but have 4 in record',
record_defaults=record_defaults)
def testCsvDataset_withHeader(self):
record_defaults = [[0]] * 2
inputs = [['col1,col2', '1,2']]
expected = [[1, 2]]
self._test_dataset(
inputs,
expected,
record_defaults=record_defaults,
header=True,
)
def testCsvDataset_withHeaderAndNoRecords(self):
record_defaults = [[0]] * 2
inputs = [['col1,col2']]
expected = []
self._test_dataset(
inputs,
expected,
record_defaults=record_defaults,
header=True,
)
def testCsvDataset_errorWithHeaderEmptyFile(self):
record_defaults = [[0]] * 2
inputs = [[]]
expected_err_re = "Can't read header of file"
self._test_dataset(
inputs,
expected_err_re=expected_err_re,
record_defaults=record_defaults,
header=True,
)
def testCsvDataset_withEmptyFile(self):
record_defaults = [['']] * 2
inputs = [['']] # Empty file
self._test_dataset(
inputs, expected_output=[], record_defaults=record_defaults)
def testCsvDataset_errorWithEmptyRecord(self):
record_defaults = [['']] * 2
inputs = [['', '1,2']] # First record is empty
self._test_dataset(
inputs,
expected_err_re='Expect 2 fields but have 1 in record',
record_defaults=record_defaults)
def testCsvDataset_withChainedOps(self):
# Testing that one dataset can create multiple iterators fine.
# `repeat` creates multiple iterators from the same C++ Dataset.
record_defaults = [[0]] * 4
inputs = [['1,,3,4', '5,6,,8']]
ds_actual, ds_expected = self._make_test_datasets(
inputs, record_defaults=record_defaults)
self.assertDatasetsEqual(
ds_actual.repeat(5).prefetch(1),
ds_expected.repeat(5).prefetch(1))
def testCsvDataset_withTypeDefaults(self):
# Testing using dtypes as record_defaults for required fields
record_defaults = [dtypes.float32, [0.0]]
inputs = [['1.0,2.0', '3.0,4.0']]
self._test_dataset(
inputs,
[[1.0, 2.0], [3.0, 4.0]],
record_defaults=record_defaults,
)
def testMakeCsvDataset_fieldOrder(self):
data = [[
'1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19',
'1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19'
]]
file_path = self._setup_files(data)
ds = readers.make_csv_dataset(
file_path, batch_size=1, shuffle=False, num_epochs=1)
nxt = self.getNext(ds)
result = list(self.evaluate(nxt()).values())
self.assertEqual(result, sorted(result))
## The following tests exercise parsing logic for quoted fields
def testCsvDataset_withQuoted(self):
record_defaults = [['']] * 4
inputs = [['"a","b","c :)","d"', '"e","f","g :(","h"']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
def testCsvDataset_withOneColAndQuotes(self):
record_defaults = [['']]
inputs = [['"0"', '"1"', '"2"']]
self._test_dataset(
inputs, [['0'], ['1'], ['2']], record_defaults=record_defaults)
def testCsvDataset_withNewLine(self):
# In this case, we expect it to behave differently from
# TextLineDataset->map(decode_csv) since that flow has bugs
record_defaults = [['']] * 4
inputs = [['a,b,"""c""\n0","d\ne"', 'f,g,h,i']]
expected = [['a', 'b', '"c"\n0', 'd\ne'], ['f', 'g', 'h', 'i']]
self._test_dataset(inputs, expected, record_defaults=record_defaults)
def testCsvDataset_withNewLineInUnselectedCol(self):
record_defaults = [['']]
inputs = [['1,"2\n3",4', '5,6,7']]
self._test_dataset(
inputs,
expected_output=[['1'], ['5']],
record_defaults=record_defaults,
select_cols=[0])
def testCsvDataset_withMultipleNewLines(self):
# In this case, we expect it to behave differently from
# TextLineDataset->map(decode_csv) since that flow has bugs
record_defaults = [['']] * 4
inputs = [['a,"b\n\nx","""c""\n \n0","d\ne"', 'f,g,h,i']]
expected = [['a', 'b\n\nx', '"c"\n \n0', 'd\ne'], ['f', 'g', 'h', 'i']]
self._test_dataset(inputs, expected, record_defaults=record_defaults)
def testCsvDataset_errorWithTerminateMidRecord(self):
record_defaults = [['']] * 4
inputs = [['a,b,c,"a']]
self._test_dataset(
inputs,
expected_err_re=
'Reached end of file without closing quoted field in record',
record_defaults=record_defaults)
def testCsvDataset_withEscapedQuotes(self):
record_defaults = [['']] * 4
inputs = [['1.0,2.1,"she said: ""hello""",4.3', '5.4,6.5,goodbye,8.7']]
self._test_by_comparison(inputs, record_defaults=record_defaults)
## Testing that parsing works with all buffer sizes, quoted/unquoted fields,
## and different types of line breaks
def testCsvDataset_withInvalidBufferSize(self):
record_defaults = [['']] * 4
inputs = [['a,b,c,d']]
self._test_dataset(
inputs,
expected_err_re='buffer_size should be positive',
record_defaults=record_defaults,
buffer_size=0)
def _test_dataset_on_buffer_sizes(self,
inputs,
expected,
linebreak,
record_defaults,
compression_type=None,
num_sizes_to_test=20):
# Testing reading with a range of buffer sizes that should all work.
for i in list(range(1, 1 + num_sizes_to_test)) + [None]:
self._test_dataset(
inputs,
expected,
linebreak=linebreak,
compression_type=compression_type,
record_defaults=record_defaults,
buffer_size=i)
def testCsvDataset_withLF(self):
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\n', record_defaults=record_defaults)
def testCsvDataset_withCR(self):
# Test that when the line separator is '\r', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r', record_defaults=record_defaults)
def testCsvDataset_withCRLF(self):
# Test that when the line separator is '\r\n', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['abc,def,ghi', '0,1,2', ',,']]
expected = [['abc', 'def', 'ghi'], ['0', '1', '2'], ['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r\n', record_defaults=record_defaults)
def testCsvDataset_withBufferSizeAndQuoted(self):
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\n', record_defaults=record_defaults)
def testCsvDataset_withCRAndQuoted(self):
# Test that when the line separator is '\r', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r', record_defaults=record_defaults)
def testCsvDataset_withCRLFAndQuoted(self):
# Test that when the line separator is '\r\n', parsing works with all buffer
# sizes
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs, expected, linebreak='\r\n', record_defaults=record_defaults)
def testCsvDataset_withGzipCompressionType(self):
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs,
expected,
linebreak='\r\n',
compression_type='GZIP',
record_defaults=record_defaults)
def testCsvDataset_withZlibCompressionType(self):
record_defaults = [['NA']] * 3
inputs = [['"\n\n\n","\r\r\r","abc"', '"0","1","2"', '"","",""']]
expected = [['\n\n\n', '\r\r\r', 'abc'], ['0', '1', '2'],
['NA', 'NA', 'NA']]
self._test_dataset_on_buffer_sizes(
inputs,
expected,
linebreak='\r\n',
compression_type='ZLIB',
record_defaults=record_defaults)
def testCsvDataset_withScalarDefaults(self):
record_defaults = [constant_op.constant(0, dtype=dtypes.int64)] * 4
inputs = [[',,,', '1,1,1,', ',2,2,2']]
self._test_dataset(
inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]],
record_defaults=record_defaults)
def testCsvDataset_with2DDefaults(self):
record_defaults = [constant_op.constant([[0]], dtype=dtypes.int64)] * 4
inputs = [[',,,', '1,1,1,', ',2,2,2']]
if context.executing_eagerly():
err_spec = errors.InvalidArgumentError, (
'Each record default should be at '
'most rank 1.')
else:
err_spec = ValueError, 'Shape must be at most rank 1 but is rank 2'
with self.assertRaisesWithPredicateMatch(*err_spec):
self._test_dataset(
inputs, [[0, 0, 0, 0], [1, 1, 1, 0], [0, 2, 2, 2]],
record_defaults=record_defaults)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/csv_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing the input pipeline statistics gathering ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python import tf2
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import gfile
class StatsDatasetTestBase(test_base.DatasetTestBase):
"""Base class for testing statistics gathered in `StatsAggregator`."""
@classmethod
def setUpClass(cls):
if tf2.enabled():
stats_aggregator._DEFAULT_MAX_QUEUE = 0 # pylint: disable=protected-access
stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV2
# TODO(b/116314787): add graph mode support for StatsAggregatorV2.
else:
stats_aggregator.StatsAggregator = stats_aggregator.StatsAggregatorV1
return test_util.run_all_in_graph_and_eager_modes(cls)
def datasetExperimentalStats(self,
dataset,
aggregator,
prefix="",
counter_prefix=""):
options = dataset_ops.Options()
options.experimental_stats.aggregator = aggregator
options.experimental_stats.prefix = prefix
options.experimental_stats.counter_prefix = counter_prefix
options.experimental_stats.latency_all_edges = False
return dataset.with_options(options)
def regexForNodeName(self, op_name, stats_type=""):
if stats_type:
return "".join([op_name, r"/_\d+::", stats_type])
return "".join([op_name, r"/_\d+"])
def assertStatisticsContains(self, handle, tag, num_events=-1, offset=0):
if tf2.enabled():
self._assertEventContains(handle, tag, num_events, offset)
else:
self._assertSummaryContains(handle, tag)
def assertStatisticsHasCount(self,
handle,
tag,
count,
num_events=-1,
greater_than=False,
offset=0):
if tf2.enabled():
self._assertEventHasCount(handle, tag, count, num_events, greater_than,
offset)
else:
self._assertSummaryHasCount(handle, tag, count, greater_than)
def assertStatisticsHasSum(self,
handle,
tag,
expected_value,
num_events=-1,
offset=0):
if tf2.enabled():
self._assertEventHasSum(handle, tag, expected_value, num_events, offset)
else:
self._assertSummaryHasSum(handle, tag, expected_value)
def assertStatisticsHasScalarValue(self,
handle,
tag,
expected_value,
num_events=-1,
offset=0):
if tf2.enabled():
self._assertEventHasScalarValue(handle, tag, expected_value, num_events,
offset)
else:
self._assertSummaryHasScalarValue(handle, tag, expected_value)
def assertStatisticsHasRange(self,
handle,
tag,
min_value,
max_value,
num_events=-1,
offset=0):
if tf2.enabled():
self._assertEventHasRange(handle, tag, min_value, max_value, num_events,
offset)
else:
self._assertSummaryHasRange(handle, tag, min_value, max_value)
def _assertSummaryContains(self, summary_str, tag):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summary_str)
for value in summary_proto.value:
if re.match(tag, value.tag):
return
self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto))
def _assertSummaryHasCount(self,
summary_str,
tag,
expected_value,
greater_than=False):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summary_str)
for value in summary_proto.value:
if re.match(tag, value.tag):
if greater_than:
self.assertGreaterEqual(value.histo.num, expected_value)
else:
self.assertEqual(expected_value, value.histo.num)
return
self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto))
def _assertSummaryHasRange(self, summary_str, tag, min_value, max_value):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summary_str)
for value in summary_proto.value:
if re.match(tag, value.tag):
self.assertLessEqual(min_value, value.histo.min)
self.assertGreaterEqual(max_value, value.histo.max)
return
self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto))
def _assertSummaryHasSum(self, summary_str, tag, expected_value):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summary_str)
for value in summary_proto.value:
if re.match(tag, value.tag):
self.assertEqual(expected_value, value.histo.sum)
return
self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto))
def _assertSummaryHasScalarValue(self, summary_str, tag, expected_value):
summary_proto = summary_pb2.Summary()
summary_proto.ParseFromString(summary_str)
for value in summary_proto.value:
if re.match(tag, value.tag):
self.assertEqual(expected_value, value.simple_value)
return
self.fail("Expected tag %r not found in summary %r" % (tag, summary_proto))
# TODO(b/116314787): add tests to check the correctness of steps as well.
def _assertEventContains(self, logdir, tag, num_events, offset):
events = _events_from_logdir(logdir)
if num_events == -1:
self.assertGreater(len(events), 1)
for event in events[::-1]:
if re.match(tag, event.summary.value[0].tag):
return
self.fail("Expected tag %r not found in event file in %r" % (tag, logdir))
else:
self.assertEqual(len(events), num_events)
self.assertTrue(
re.match(tag, events[num_events - offset - 1].summary.value[0].tag))
def _assertEventHasCount(self, logdir, tag, count, num_events, greater_than,
offset):
events = _events_from_logdir(logdir)
if num_events == -1:
self.assertGreater(len(events), 1)
for event in events[::-1]:
if re.match(tag, event.summary.value[0].tag):
if greater_than:
self.assertGreaterEqual(event.summary.value[0].histo.num, count)
else:
self.assertEqual(count, event.summary.value[0].histo.num)
return
self.fail("Expected tag %r not found in event file in %r" % (tag, logdir))
else:
self.assertEqual(len(events), num_events)
self.assertTrue(
re.match(tag, events[num_events - offset - 1].summary.value[0].tag))
if greater_than:
self.assertGreaterEqual(
events[num_events - offset - 1].summary.value[0].histo.num, count)
else:
self.assertEqual(
events[num_events - offset - 1].summary.value[0].histo.num, count)
def _assertEventHasSum(self, logdir, tag, expected_value, num_events, offset):
events = _events_from_logdir(logdir)
if num_events == -1:
self.assertGreater(len(events), 1)
for event in events[::-1]:
if re.match(tag, event.summary.value[0].tag):
self.assertEqual(expected_value, event.summary.value[0].histo.sum)
return
self.fail("Expected tag %r not found in event file in %r" % (tag, logdir))
else:
self.assertEqual(len(events), num_events)
self.assertTrue(
re.match(tag, events[num_events - offset - 1].summary.value[0].tag))
self.assertEqual(
events[num_events - offset - 1].summary.value[0].histo.sum,
expected_value)
def _assertEventHasRange(self, logdir, tag, min_value, max_value, num_events,
offset):
events = _events_from_logdir(logdir)
if num_events == -1:
self.assertGreater(len(events), 1)
for event in events[::-1]:
if re.match(tag, event.summary.value[0].tag):
self.assertLessEqual(min_value, event.summary.value[0].histo.min)
self.assertGreaterEqual(max_value, event.summary.value[0].histo.max)
return
self.fail("Expected tag %r not found in event file in %r" % (tag, logdir))
else:
self.assertEqual(len(events), num_events)
self.assertTrue(
re.match(tag, events[num_events - offset - 1].summary.value[0].tag))
self.assertLessEqual(
min_value, events[num_events - offset - 1].summary.value[0].histo.min)
self.assertGreaterEqual(
max_value, events[num_events - offset - 1].summary.value[0].histo.max)
def _assertEventHasScalarValue(self, logdir, tag, expected_value, num_events,
offset):
events = _events_from_logdir(logdir)
if num_events == -1:
self.assertGreater(len(events), 1)
for event in events[::-1]:
if re.match(tag, event.summary.value[0].tag):
self.assertEqual(expected_value, event.summary.value[0].simple_value)
return
self.fail("Expected tag %r not found in event file in %r" % (tag, logdir))
else:
self.assertEqual(len(events), num_events)
self.assertTrue(
re.match(tag, events[num_events - offset - 1].summary.value[0].tag))
self.assertLessEqual(
expected_value,
events[num_events - offset - 1].summary.value[0].simple_value)
def getHandle(self, aggregator):
# pylint: disable=protected-access
if isinstance(aggregator, stats_aggregator.StatsAggregatorV1):
return self.evaluate(aggregator.get_summary())
assert isinstance(aggregator, (stats_aggregator.StatsAggregatorV2))
return aggregator._logdir
def parallelCallsStats(self,
dataset_fn,
dataset_names,
num_output,
function_processing_time=False,
check_elements=True):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_fn()
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(num_output):
next_ = self.evaluate(next_element())
if check_elements:
self.assertAllEqual(np.array([i] * i, dtype=np.int64), next_)
handle = self.getHandle(aggregator)
for dataset_name in dataset_names:
if function_processing_time:
self.assertStatisticsHasCount(
handle, r"(.*)::execution_time$", float(i + 1), greater_than=True)
self.assertStatisticsContains(
handle, self.regexForNodeName(dataset_name, "thread_utilization"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
if function_processing_time:
handle = self.getHandle(aggregator)
for dataset_name in dataset_names:
self.assertStatisticsHasCount(
handle,
r"(.*)::execution_time$",
float(num_output),
greater_than=True)
# Adding these two methods from summary_test_util, as summary_test_util is in
# contrib.
def _events_from_file(filepath):
"""Returns all events in a single event file.
Args:
filepath: Path to the event file.
Returns:
A list of all tf.Event protos in the event file.
"""
records = list(tf_record.tf_record_iterator(filepath))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def _events_from_logdir(logdir):
"""Returns all events in the single eventfile in logdir.
Args:
logdir: The directory in which the single event file is sought.
Returns:
A list of all tf.Event protos from the single event file.
Raises:
AssertionError: If logdir does not contain exactly one file.
"""
assert gfile.Exists(logdir)
files = gfile.ListDirectory(logdir)
assert len(files) == 1, "Found not exactly one file in logdir: %s" % files
return _events_from_file(os.path.join(logdir, files[0]))
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/stats_dataset_test_base.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.parse_example_dataset()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.ops import parsing_ops as contrib_parsing_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
@test_util.run_all_in_graph_and_eager_modes
class ParseExampleDatasetTest(test_base.DatasetTestBase):
def _compare_output_to_expected(self, dict_tensors, expected_tensors):
self.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
for k, v in sorted(dict_tensors.items()):
expected_v = expected_tensors[k]
if sparse_tensor.is_sparse(v):
self.assertSparseValuesEqual(expected_v, v)
else:
# One output for standard Tensor.
self.assertAllEqual(expected_v, v)
def _test(self,
input_tensor,
feature_val,
expected_values=None,
expected_err=None,
create_iterator_twice=False):
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
dataset = dataset_ops.Dataset.from_tensors(input_tensor).apply(
contrib_parsing_ops.parse_example_dataset(feature_val))
get_next = self.getNext(dataset)
self.evaluate(get_next())
return
else:
# Returns dict w/ Tensors and SparseTensors.
# Check values.
dataset = dataset_ops.Dataset.from_tensors(input_tensor).apply(
contrib_parsing_ops.parse_example_dataset(feature_val))
get_next = self.getNext(dataset)
result = self.evaluate(get_next())
self._compare_output_to_expected(result, expected_values)
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(get_next())
if create_iterator_twice:
get_next = self.getNext(dataset)
result = self.evaluate(get_next())
self._compare_output_to_expected(result, expected_values)
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(get_next())
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
batch_size = (
self.evaluate(input_tensor).size if isinstance(input_tensor, ops.Tensor)
else np.asarray(input_tensor).size)
for k, f in feature_val.items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
dataset_ops.get_legacy_output_shapes(dataset)[k].as_list()[0],
batch_size)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
dataset_ops.get_legacy_output_shapes(dataset)[k].as_list()[1], None)
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = sparse_tensor.SparseTensorValue( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array(2 * [[a_default]]),
b_name: np.array(2 * [b_default]),
c_name: np.array(2 * [c_default]),
}
self._test(
ops.convert_to_tensor(["", ""]), {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
},
expected_values=expected_output,
create_iterator_twice=True)
@test_util.run_deprecated_v1
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature(
(2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
[original.SerializeToString()],
input_features,
expected_err=(errors_impl.InvalidArgumentError,
"Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
["", ""],
input_features,
expected_err=(errors_impl.InvalidArgumentError,
"Feature: c \\(data type: float\\) is required"))
@test_util.run_deprecated_v1
def testDenseNotMatchingShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})), example(features=features({
"a": float_feature([-1, -1]),
}))
]
serialized = [m.SerializeToString() for m in original]
self._test(
ops.convert_to_tensor(serialized),
{"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)},
expected_err=(errors_impl.InvalidArgumentError,
"Key: a, Index: 1. Number of float values"))
def testDenseDefaultNoShapeShouldFail(self):
original = [example(features=features({"a": float_feature([1, 1, 3]),})),]
serialized = [m.SerializeToString() for m in original]
self._test(
ops.convert_to_tensor(serialized),
{"a": parsing_ops.FixedLenFeature(None, dtypes.float32)},
expected_err=(ValueError, "Missing shape for feature a"))
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([]), # empty float list
})),
example(features=features({
"st_d": feature(), # feature with nothing in it
})),
example(features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_st_c = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32),
np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3
expected_st_d = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[3, 0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1
expected_output = {
"st_c": expected_st_c,
"st_d": expected_st_d,
}
self._test(
ops.convert_to_tensor(serialized), {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
expected_output = {"sp": expected_sp,}
self._test(
ops.convert_to_tensor(serialized),
{"sp": parsing_ops.SparseFeature(["idx"], "val", dtypes.float32, [13])},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
serialized = [m.SerializeToString() for m in original]
expected_sp1 = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
expected_sp2 = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32),
np.array([2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
expected_output = {
"sp1": expected_sp1,
"sp2": expected_sp2,
}
self._test(
ops.convert_to_tensor(serialized), {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx", "val2", dtypes.float32, size=7, already_sorted=True)
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContaining3DSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = sparse_tensor.SparseTensorValue(
# indices
np.array([[0, 5, 0], [0, 10, 2], [3, 0, 1], [3, 3, 2], [3, 9, 0]],
dtype=np.int64),
# values
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
# shape batch == 4, max_elems = 13
np.array([4, 13, 3], dtype=np.int64))
expected_output = {"sp": expected_sp,}
self._test(
ops.convert_to_tensor(serialized), {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})), example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(
["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
},
expected_values=expected_output,
create_iterator_twice=True)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})), example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),),
]
serialized = [
m.SerializeToString() + n.SerializeToString() for (m, n) in original
]
expected_output = {
aname:
np.array(
[[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(
["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingDenseScalar(self):
original = [
example(features=features({
"a": float_feature([1]),
})), example(features=features({}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array(
[[1], [-1]], dtype=np.float32) # 2x1 (column vector)
}
self._test(
ops.convert_to_tensor(serialized), {
"a":
parsing_ops.FixedLenFeature(
(1,), dtype=dtypes.float32, default_value=-1),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingDenseWithDefaults(self):
original = [
example(features=features({
"a": float_feature([1, 1]),
})),
example(features=features({
"b": bytes_feature([b"b1"]),
})),
example(features=features({
"b": feature()
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array(
[[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(3, 1, 2,
1),
"b":
np.array(
["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(3, 1, 1, 1,
1),
}
self._test(
ops.convert_to_tensor(serialized), {
"a":
parsing_ops.FixedLenFeature(
(1, 2, 1), dtype=dtypes.float32, default_value=[3.0, -3.0]),
"b":
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string, default_value="tmp_str"),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedSparseAndSparseFeatureAndDenseWithNoDefault(self):
expected_st_a = sparse_tensor.SparseTensorValue( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64),
np.array(["a", "b", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(features=features({
"c": float_feature([3, 4]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})), example(features=features({
"c": float_feature([1, 2]),
"val": bytes_feature([b"c"]),
"idx": int64_feature([7])
}))
]
serialized = [m.SerializeToString() for m in original]
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": np.array(2 * [[a_default]]),
"b": np.array(2 * [b_default]),
"c": np.array(
[[3, 4], [1, 2]], dtype=np.float32),
}
self._test(
ops.convert_to_tensor(serialized),
{
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
},
expected_values=expected_output,
create_iterator_twice=True)
def testerializedContainingSparseAndSparseFeatureWithReuse(self):
expected_idx = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
np.array([0, 3, 7, 1]),
np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64),
np.array(["a", "b", "d", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(features=features({
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})), example(features=features({
"val": bytes_feature([b"c", b"d"]),
"idx": int64_feature([7, 1])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"idx": expected_idx,
"sp": expected_sp,
}
self._test(
ops.convert_to_tensor(serialized), {
"idx":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
},
expected_values=expected_output,
create_iterator_twice=True)
def _testSerializedContainingVarLenDenseLargerBatch(self, batch_size):
# During parsing, data read from the serialized proto is stored in buffers.
# For small batch sizes, a buffer will contain one minibatch entry.
# For larger batch sizes, a buffer may contain several minibatch
# entries. This test identified a bug where the code that copied
# data out of the buffers and into the output tensors assumed each
# buffer only contained one minibatch entry. The bug has since been fixed.
truth_int = [i for i in range(batch_size)]
truth_str = [[("foo%d" % i).encode(), ("bar%d" % i).encode()]
for i in range(batch_size)]
expected_str = copy.deepcopy(truth_str)
# Delete some intermediate entries
for i in range(batch_size):
col = 1
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry
expected_str[i][col] = b"default"
col -= 1
truth_str[i].pop()
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry (possibly again)
expected_str[i][col] = b"default"
truth_str[i].pop()
expected_output = {
# Batch size batch_size, 1 time step.
"a": np.array(truth_int, dtype=np.int64).reshape(batch_size, 1),
# Batch size batch_size, 2 time steps.
"b": np.array(expected_str, dtype="|S").reshape(batch_size, 2),
}
original = [
example(features=features(
{"a": int64_feature([truth_int[i]]),
"b": bytes_feature(truth_str[i])}))
for i in range(batch_size)
]
serialized = [m.SerializeToString() for m in original]
self._test(
ops.convert_to_tensor(serialized, dtype=dtypes.string), {
"a":
parsing_ops.FixedLenSequenceFeature(
shape=(),
dtype=dtypes.int64,
allow_missing=True,
default_value=-1),
"b":
parsing_ops.FixedLenSequenceFeature(
shape=[],
dtype=dtypes.string,
allow_missing=True,
default_value="default"),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingVarLenDenseLargerBatch(self):
np.random.seed(3456)
for batch_size in (1, 10, 20, 100, 256):
self._testSerializedContainingVarLenDenseLargerBatch(batch_size)
def testSerializedShapeMismatch(self):
aname = "a"
bname = "b"
cname = "c"
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
if context.executing_eagerly():
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature((2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(errors_impl.InvalidArgumentError,
"Input to reshape is a tensor with 0 values"))
else:
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature((2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(ValueError,
"Cannot reshape a tensor with 0 elements to shape"))
@test_util.run_deprecated_v1
def testSerializedContainingVarLenDense(self):
aname = "a"
bname = "b"
cname = "c"
dname = "d"
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(
features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[
[0, 0, 0, 0],
[1, 1, 0, 0],
[-1, -1, 2, 2],
[0, 0, 0, 0],
],
dtype=np.float32).reshape(4, 2, 2, 1),
bname:
np.array(
[["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]],
dtype=bytes).reshape(4, 2, 1, 1, 1),
cname:
np.array([2, 0, 0, 3], dtype=np.int64).reshape(4, 1),
dname:
np.empty(shape=(4, 0), dtype=bytes),
}
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
},
expected_values=expected_output,
create_iterator_twice=True)
# Test with padding values.
expected_output_custom_padding = dict(expected_output)
expected_output_custom_padding[aname] = np.array(
[
[-2, -2, -2, -2],
[1, 1, -2, -2],
[-1, -1, 2, 2],
[-2, -2, -2, -2],
],
dtype=np.float32).reshape(4, 2, 2, 1)
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=-2.0),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}, expected_output_custom_padding)
# Change number of required values so the inputs are not a
# multiple of this size.
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(
errors_impl.OpError, "Key: b, Index: 2. "
"Number of bytes values is not a multiple of stride length."))
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenFeature((None, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(ValueError,
"First dimension of shape for feature a unknown. "
"Consider using FixedLenSequenceFeature."))
self._test(
ops.convert_to_tensor(serialized), {
cname:
parsing_ops.FixedLenFeature(
(1, None), dtype=dtypes.int64, default_value=[[1]]),
},
expected_err=(ValueError,
"All dimensions of shape for feature c need to be known "
r"but received \(1, None\)."))
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=False),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
},
expected_err=(ValueError,
"Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True."))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/parse_example_dataset_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.SqlDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import sql_dataset_test_base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SqlDatasetTest(sql_dataset_test_base.SqlDatasetTestBase):
# Test that SqlDataset can read from a database table.
def testReadResultSet(self):
for _ in range(2): # Run twice to verify statelessness of db operations.
dataset = self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string),
num_repeats=2)
self.assertDatasetProduces(
dataset,
expected_output=[(b"John", b"Doe", b"Hi!"),
(b"Jane", b"Moe", b"Hi again!")] * 2,
num_test_iterations=2)
# Test that SqlDataset works on a join query.
def testReadResultSetJoinQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT students.first_name, state, motto FROM students "
"INNER JOIN people "
"ON students.first_name = people.first_name "
"AND students.last_name = people.last_name",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"California", b"Hi!"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset can read a database entry with a null-terminator
# in the middle of the text and place the entry in a `string` tensor.
def testReadResultSetNullTerminator(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, favorite_nonsense_word "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"n\0nsense"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"nonsense\0"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that SqlDataset works when used on two different queries.
# Because the output types of the dataset must be determined at graph-creation
# time, the two queries must have the same number and types of columns.
def testReadResultSetReuseSqlDataset(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"Hi!"), self.evaluate(get_next()))
self.assertEqual((b"Jane", b"Moe", b"Hi again!"), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, state FROM people "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
self.assertEqual((b"John", b"Doe", b"California"),
self.evaluate(get_next()))
self.assertEqual((b"Benjamin", b"Franklin", b"Pennsylvania"),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an `OutOfRangeError` is raised on the first call to
# `get_next_str_only` if result set is empty.
def testReadEmptyResultSet(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, motto FROM students "
"WHERE first_name = 'Nonexistent'",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that an error is raised when `driver_name` is invalid.
def testReadResultSetWithInvalidDriverName(self):
with self.assertRaises(errors.InvalidArgumentError):
dataset = self._createSqlDataset(
driver_name="sqlfake",
query="SELECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string))
self.assertDatasetProduces(dataset, expected_output=[])
# Test that an error is raised when a column name in `query` is nonexistent
def testReadResultSetWithInvalidColumnName(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, fake_column FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when there is a syntax error in `query`.
def testReadResultSetOfQueryWithSyntaxError(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELEmispellECT first_name, last_name, motto FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.UnknownError):
self.evaluate(get_next())
# Test that an error is raised when the number of columns in `query`
# does not match the length of `, output_types`.
def testReadResultSetWithMismatchBetweenColumnsAndOutputTypes(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that no results are returned when `query` is an insert query rather
# than a select query. In particular, the error refers to the number of
# output types passed to the op not matching the number of columns in the
# result set of the query (namely, 0 for an insert statement.)
def testReadResultSetOfInsertQuery(self):
get_next = self.getNext(
self._createSqlDataset(
query="INSERT INTO students (first_name, last_name, motto) "
"VALUES ('Foo', 'Bar', 'Baz'), ('Fizz', 'Buzz', 'Fizzbuzz')",
output_types=(dtypes.string, dtypes.string, dtypes.string)))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int8` tensor.
def testReadResultSetInt8(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int8, dtypes.int8)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int8` tensor.
def testReadResultSetInt8MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT desk_number, favorite_negative_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.int8, dtypes.int8)))
self.assertEqual((9, -2), self.evaluate(get_next()))
# Max and min values of int8
self.assertEqual((127, -128), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int16` tensor.
def testReadResultSetInt16(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income, favorite_negative_number "
"FROM students "
"WHERE first_name = 'John' ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16, dtypes.int16)))
self.assertEqual((b"John", 0, -2), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int16` tensor.
def testReadResultSetInt16MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int16)))
# Max value of int16
self.assertEqual((b"John", 32767), self.evaluate(get_next()))
# Min value of int16
self.assertEqual((b"Jane", -32768), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in an `int32` tensor.
def testReadResultSetInt32(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 0), self.evaluate(get_next()))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int32` tensor.
def testReadResultSetInt32MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
# Max value of int32
self.assertEqual((b"John", 2147483647), self.evaluate(get_next()))
# Min value of int32
self.assertEqual((b"Jane", -2147483648), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a numeric `varchar` from a SQLite database
# table and place it in an `int32` tensor.
def testReadResultSetInt32VarCharColumnAsInt(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, school_id FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int32)))
self.assertEqual((b"John", 123), self.evaluate(get_next()))
self.assertEqual((b"Jane", 1000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in an `int64` tensor.
def testReadResultSetInt64(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a negative or 0-valued integer from a
# SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64NegativeAndZero(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, income FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
self.assertEqual((b"John", 0), self.evaluate(get_next()))
self.assertEqual((b"Jane", -20000), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a large (positive or negative) integer from
# a SQLite database table and place it in an `int64` tensor.
def testReadResultSetInt64MaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_big_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.int64)))
# Max value of int64
self.assertEqual((b"John", 9223372036854775807), self.evaluate(get_next()))
# Min value of int64
self.assertEqual((b"Jane", -9223372036854775808), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table and
# place it in a `uint8` tensor.
def testReadResultSetUInt8(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint8)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read the minimum and maximum uint8 values from a
# SQLite database table and place them in `uint8` tensors.
def testReadResultSetUInt8MinAndMaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, brownie_points FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint8)))
# Min value of uint8
self.assertEqual((b"John", 0), self.evaluate(get_next()))
# Max value of uint8
self.assertEqual((b"Jane", 255), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer from a SQLite database table
# and place it in a `uint16` tensor.
def testReadResultSetUInt16(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, desk_number FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint16)))
self.assertEqual((b"John", 9), self.evaluate(get_next()))
self.assertEqual((b"Jane", 127), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read the minimum and maximum uint16 values from a
# SQLite database table and place them in `uint16` tensors.
def testReadResultSetUInt16MinAndMaxValues(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, account_balance FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.uint16)))
# Min value of uint16
self.assertEqual((b"John", 0), self.evaluate(get_next()))
# Max value of uint16
self.assertEqual((b"Jane", 65535), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a 0-valued and 1-valued integer from a
# SQLite database table and place them as `True` and `False` respectively
# in `bool` tensors.
def testReadResultSetBool(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, registration_complete FROM students "
"ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.bool)))
self.assertEqual((b"John", True), self.evaluate(get_next()))
self.assertEqual((b"Jane", False), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read an integer that is not 0-valued or 1-valued
# from a SQLite database table and place it as `True` in a `bool` tensor.
def testReadResultSetBoolNotZeroOrOne(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, favorite_medium_sized_number "
"FROM students ORDER BY first_name DESC",
output_types=(dtypes.string, dtypes.bool)))
self.assertEqual((b"John", True), self.evaluate(get_next()))
self.assertEqual((b"Jane", True), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table
# and place it in a `float64` tensor.
def testReadResultSetFloat64(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, victories FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertEqual((b"George", b"Washington", 20.0),
self.evaluate(get_next()))
self.assertEqual((b"John", b"Adams", -19.95), self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table beyond
# the precision of 64-bit IEEE, without throwing an error. Test that
# `SqlDataset` identifies such a value as equal to itself.
def testReadResultSetFloat64OverlyPrecise(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, accolades FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertEqual(
(b"George", b"Washington",
1331241.321342132321324589798264627463827647382647382643874),
self.evaluate(get_next()))
self.assertEqual(
(b"John", b"Adams",
1331241321342132321324589798264627463827647382647382643874.0),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Test that `SqlDataset` can read a float from a SQLite database table,
# representing the largest integer representable as a 64-bit IEEE float
# such that the previous integer is also representable as a 64-bit IEEE float.
# Test that `SqlDataset` can distinguish these two numbers.
def testReadResultSetFloat64LargestConsecutiveWholeNumbersNotEqual(self):
get_next = self.getNext(
self._createSqlDataset(
query="SELECT first_name, last_name, triumphs FROM townspeople "
"ORDER BY first_name",
output_types=(dtypes.string, dtypes.string, dtypes.float64)))
self.assertNotEqual((b"George", b"Washington", 9007199254740992.0),
self.evaluate(get_next()))
self.assertNotEqual((b"John", b"Adams", 9007199254740991.0),
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/sql_dataset_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `experimental_slack` option."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class PrefetchWithSlackTest(test_base.DatasetTestBase, parameterized.TestCase):
@test_util.run_v1_only("b/121264236")
def testPrefetchWithSlackOption(self):
"""Determines slack_period based on num devices attached to iterator."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, ["/cpu:1", "/cpu:2"])
dataset = multi_device_iterator._dataset # pylint: disable=protected-access
self.assertIn("slack", dataset.options()._static_optimizations())
self.assertIn("slack:slack_period:2",
dataset.options()._static_optimization_configs())
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.test_session(config=config):
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
def testPrefetchWithSlackOptionWithoutIterator(self):
"""Defaults to slack period of 1 without iterator."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertIn("slack", dataset.options()._static_optimizations())
self.assertIn("slack:slack_period:1",
dataset.options()._static_optimization_configs())
self.assertDatasetProduces(dataset, range(10))
def testWithPassthroughDataset(self):
"""Should still work with a passthrough dataset after prefetch()."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
dataset = dataset.map(lambda x: x + 1)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(1, 11))
def testErrorWithoutPrefetch(self):
"""The rewrite fails if there is no prefetch() in the pipeline."""
dataset = dataset_ops.Dataset.range(10)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
with self.assertRaises(errors.InvalidArgumentError):
get_next = self.getNext(dataset)
self.evaluate(get_next())
def testErrorWithInvalidDataset(self):
"""With a nested dataset op after prefetch, the rewrite should fail."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensors)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
with self.assertRaises(errors.InvalidArgumentError):
get_next = self.getNext(dataset)
self.evaluate(get_next())
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/prefetch_with_slack_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.dense_to_sparse_batch()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class DenseToSparseBatchTest(test_base.DatasetTestBase):
def testDenseToSparseBatchDataset(self):
components = np.random.randint(12, size=(100,)).astype(np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: array_ops.fill([x], x)).apply(
batching.dense_to_sparse_batch(4, [12]))
get_next = self.getNext(dataset)
for start in range(0, len(components), 4):
results = self.evaluate(get_next())
self.assertAllEqual([[i, j]
for i, c in enumerate(components[start:start + 4])
for j in range(c)], results.indices)
self.assertAllEqual(
[c for c in components[start:start + 4] for _ in range(c)],
results.values)
self.assertAllEqual([min(4,
len(components) - start), 12],
results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testDenseToSparseBatchDatasetWithUnknownShape(self):
components = np.random.randint(5, size=(40,)).astype(np.int32)
dataset = dataset_ops.Dataset.from_tensor_slices(
components).map(lambda x: array_ops.fill([x, x], x)).apply(
batching.dense_to_sparse_batch(4, [5, None]))
get_next = self.getNext(dataset)
for start in range(0, len(components), 4):
results = self.evaluate(get_next())
self.assertAllEqual([[i, j, z]
for i, c in enumerate(components[start:start + 4])
for j in range(c)
for z in range(c)], results.indices)
self.assertAllEqual([
c for c in components[start:start + 4] for _ in range(c)
for _ in range(c)
], results.values)
self.assertAllEqual([
min(4,
len(components) - start), 5,
np.max(components[start:start + 4])
], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testDenseToSparseBatchDatasetWithInvalidShape(self):
input_tensor = array_ops.constant([[1]])
with self.assertRaisesRegexp(ValueError, "Dimension -2 must be >= 0"):
dataset_ops.Dataset.from_tensors(input_tensor).apply(
batching.dense_to_sparse_batch(4, [-2]))
def testDenseToSparseBatchDatasetShapeErrors(self):
def dataset_fn(input_tensor):
return dataset_ops.Dataset.from_tensors(input_tensor).apply(
batching.dense_to_sparse_batch(4, [12]))
# Initialize with an input tensor of incompatible rank.
get_next = self.getNext(dataset_fn([[1]]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible with the row shape"):
self.evaluate(get_next())
# Initialize with an input tensor that is larger than `row_shape`.
get_next = self.getNext(dataset_fn(np.int32(range(13))))
with self.assertRaisesRegexp(errors.DataLossError,
"larger than the row shape"):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/dense_to_sparse_batch_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Wrapping / Unwrapping dataset variants."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class WrapDatasetVariantTest(test_base.DatasetTestBase):
def testBasic(self):
ds = dataset_ops.Dataset.range(100)
ds_variant = ds._variant_tensor # pylint: disable=protected-access
wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant)
unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant(wrapped_variant)
variant_ds = dataset_ops._VariantDataset(unwrapped_variant,
ds._element_structure)
get_next = self.getNext(variant_ds, requires_initialization=True)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
@test_util.run_v1_only("b/123901304")
def testSkipEagerGPU(self):
ds = dataset_ops.Dataset.range(100)
ds_variant = ds._variant_tensor # pylint: disable=protected-access
wrapped_variant = gen_dataset_ops.wrap_dataset_variant(ds_variant)
with ops.device("/gpu:0"):
gpu_wrapped_variant = array_ops.identity(wrapped_variant)
unwrapped_variant = gen_dataset_ops.unwrap_dataset_variant(
gpu_wrapped_variant)
variant_ds = dataset_ops._VariantDataset(unwrapped_variant,
ds._element_structure)
iterator = dataset_ops.make_initializable_iterator(variant_ds)
get_next = iterator.get_next()
with self.cached_session():
self.evaluate(iterator.initializer)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/wrap_unwrap_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("b/123903858: Add eager and V2 test coverage")
class MapDefunTest(test_base.DatasetTestBase):
def testNoIntraOpLimit(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(
simple_fn, [elems], [dtypes.int32], [(2,)],
max_intra_op_parallelism=0)[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunSimple(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(2,)])[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunMismatchedTypes(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return math_ops.cast(x, dtypes.float64)
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
def testMapDefunReduceDim(self):
# Tests where the output has a different rank from the input
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return array_ops.gather(x, 0)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
expected = constant_op.constant([1, 3, 5])
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunMultipleOutputs(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return (x, math_ops.cast(x * 2 + 3, dtypes.float64))
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32, dtypes.float64], [(2,),
(2,)])
expected = [elems, elems * 2 + 3]
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])[0]
self.assertEqual(result.get_shape(), (3, 2))
def testMapDefunPartialShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
elems = array_ops.placeholder(dtypes.int64, (None, 2))
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])
self.assertEqual(result[0].get_shape().as_list(), [None, 2])
def testMapDefunRaisesErrorOnRuntimeShapeMismatch(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec(None, dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32)
])
def fn(x, y):
return x, y
elems1 = array_ops.placeholder(dtypes.int32)
elems2 = array_ops.placeholder(dtypes.int32)
result = map_defun.map_defun(fn, [elems1, elems2],
[dtypes.int32, dtypes.int32], [(), ()])
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"All inputs must have the same dimension 0."):
sess.run(result, feed_dict={elems1: [1, 2, 3, 4, 5], elems2: [1, 2, 3]})
def testMapDefunRaisesDefunError(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return array_ops.identity(x)
elems = constant_op.constant([0, 0, 0, 37, 0])
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(result)
def testMapDefunCancelledCorrectly(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([5], dtypes.int64)])
def defun(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
c = array_ops.tile(
array_ops.expand_dims(
constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0),
[100, 1])
map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0]
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
self.evaluate(map_defun_op)
def testMapDefunWithUnspecifiedOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
res = x * 2 + 3
return (res, res + 1, res + 2)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems],
[dtypes.int32, dtypes.int32, dtypes.int32],
[None, (None,), (2,)])
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r[0]), self.evaluate(expected))
self.assertAllEqual(self.evaluate(r[1]), self.evaluate(expected + 1))
self.assertAllEqual(self.evaluate(r[2]), self.evaluate(expected + 2))
def testMapDefunWithDifferentOutputShapeEachRun(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
elems = array_ops.placeholder(dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [None])[0]
with session.Session() as sess:
self.assertAllEqual(sess.run(r, feed_dict={elems: [0]}), [3])
self.assertAllEqual(
sess.run(r, feed_dict={elems: [[0], [1]]}), [[3], [5]])
def testMapDefunWithWrongOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(1,)])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
def testMapDefunWithInvalidInput(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2
c = constant_op.constant(2)
with self.assertRaises(ValueError):
# Fails at graph construction time for inputs with known shapes.
r = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [None])[0]
p = array_ops.placeholder(dtypes.int32)
r = map_defun.map_defun(simple_fn, [p], [dtypes.int32], [None])[0]
with session.Session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(r, feed_dict={p: 0})
def testMapDefunWithParentCancellation(self):
# Checks that a cancellation of the parent graph is threaded through to
# MapDefunOp correctly.
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def simple_fn(x):
del x
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
# Blocking
return queue.dequeue_many(5)
c = constant_op.constant([1, 2, 3, 4, 5])
map_defun_op = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [()])[0]
with self.cached_session() as sess:
thread = self.checkedThread(
self.assert_op_cancelled, args=(map_defun_op,))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
def testMapDefunWithCapturedInputs(self):
c = constant_op.constant(2)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return x + c
x = constant_op.constant([1, 2, 3, 4])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.int32], [()])[0]
expected = x + c
self.assertAllEqual(self.evaluate(expected), self.evaluate(map_defun_op))
def testMapDefunWithVariantTensor(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.variant)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.variant],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertSparseValuesEqual(expected, actual)
def testMapDefunWithVariantTensorAsCaptured(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
del x
return serialized
x = constant_op.constant([0, 0])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.variant], [None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertSparseValuesEqual(expected, actual)
def testMapDefunWithStrTensor(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.string)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.string],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertSparseValuesEqual(expected, actual)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/map_defun_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.make_csv_dataset()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
import numpy as np
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MakeCsvDatasetTest(test_base.DatasetTestBase):
def _make_csv_dataset(self, filenames, batch_size, num_epochs=1, **kwargs):
return readers.make_csv_dataset(
filenames, batch_size=batch_size, num_epochs=num_epochs, **kwargs)
def _setup_files(self, inputs, linebreak="\n", compression_type=None):
filenames = []
for i, ip in enumerate(inputs):
fn = os.path.join(self.get_temp_dir(), "temp_%d.csv" % i)
contents = linebreak.join(ip).encode("utf-8")
if compression_type is None:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
filenames.append(fn)
return filenames
def _next_expected_batch(self, expected_output, expected_keys, batch_size,
num_epochs):
features = {k: [] for k in expected_keys}
for _ in range(num_epochs):
for values in expected_output:
for n, key in enumerate(expected_keys):
features[key].append(values[n])
if len(features[expected_keys[0]]) == batch_size:
yield features
features = {k: [] for k in expected_keys}
if features[expected_keys[0]]: # Leftover from the last batch
yield features
def _verify_output(
self,
dataset,
batch_size,
num_epochs,
label_name,
expected_output,
expected_keys,
):
get_next = self.getNext(dataset)
for expected_features in self._next_expected_batch(
expected_output,
expected_keys,
batch_size,
num_epochs,
):
actual_features = self.evaluate(get_next())
if label_name is not None:
expected_labels = expected_features.pop(label_name)
self.assertAllEqual(expected_labels, actual_features[1])
actual_features = actual_features[0]
for k in expected_features.keys():
# Compare features
self.assertAllEqual(expected_features[k], actual_features[k])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def _test_dataset(self,
inputs,
expected_output,
expected_keys,
batch_size=1,
num_epochs=1,
label_name=None,
**kwargs):
"""Checks that elements produced by CsvDataset match expected output."""
# Convert str type because py3 tf strings are bytestrings
filenames = self._setup_files(
inputs, compression_type=kwargs.get("compression_type", None))
dataset = self._make_csv_dataset(
filenames,
batch_size=batch_size,
num_epochs=num_epochs,
label_name=label_name,
**kwargs)
self._verify_output(dataset, batch_size, num_epochs, label_name,
expected_output, expected_keys)
def testMakeCSVDataset(self):
"""Tests making a CSV dataset with keys and defaults provided."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
def testMakeCSVDataset_withBatchSizeAndEpochs(self):
"""Tests making a CSV dataset with keys and defaults provided."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=3,
num_epochs=10,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
def testMakeCSVDataset_withCompressionType(self):
"""Tests `compression_type` argument."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
for compression_type in ("GZIP", "ZLIB"):
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
compression_type=compression_type,
)
def testMakeCSVDataset_withBadInputs(self):
"""Tests that exception is raised when input is malformed.
"""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
filenames = self._setup_files(inputs)
# Duplicate column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=record_defaults,
label_name="col0",
column_names=column_names * 2)
# Label key not one of column names
with self.assertRaises(ValueError):
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=record_defaults,
label_name="not_a_real_label",
column_names=column_names)
def testMakeCSVDataset_withNoLabel(self):
"""Tests making a CSV dataset with no label provided."""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
def testMakeCSVDataset_withNoHeader(self):
"""Tests that datasets can be created from CSV files with no header line.
"""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [["0,1,2,3,4", "5,6,7,8,9"], ["10,11,12,13,14", "15,16,17,18,19"]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=False,
column_defaults=record_defaults,
)
def testMakeCSVDataset_withTypes(self):
"""Tests that defaults can be a dtype instead of a Tensor for required vals.
"""
record_defaults = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.string
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x[0] for x in column_names), "0,1,2,3,4", "5,6,7,8,9"],
[
",".join(x[0] for x in column_names), "10,11,12,13,14",
"15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
def testMakeCSVDataset_withNoColNames(self):
"""Tests that datasets can be created when column names are not specified.
In that case, we should infer the column names from the header lines.
"""
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
expected_output = [[0, 1, 2, 3, b"4"], [5, 6, 7, 8, b"9"],
[10, 11, 12, 13, b"14"], [15, 16, 17, 18, b"19"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
column_defaults=record_defaults,
)
def testMakeCSVDataset_withTypeInferenceMismatch(self):
# Test that error is thrown when num fields doesn't match columns
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
filenames = self._setup_files(inputs)
with self.assertRaises(ValueError):
self._make_csv_dataset(
filenames,
column_names=column_names + ["extra_name"],
column_defaults=None,
batch_size=2,
num_epochs=10)
def testMakeCSVDataset_withTypeInference(self):
"""Tests that datasets can be created when no defaults are specified.
In that case, we should infer the types from the first N records.
"""
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
"0,%s,2.0,3e50,rabbit" % str_int32_max
]]
expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
)
def testMakeCSVDataset_withTypeInferenceFallthrough(self):
"""Tests that datasets can be created when no defaults are specified.
Tests on a deliberately tricky file.
"""
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
",,,,",
"0,0,0.0,0.0,0.0",
"0,%s,2.0,3e50,rabbit" % str_int32_max,
",,,,",
]]
expected_output = [[0, 0, 0, 0, b""], [0, 0, 0, 0, b"0.0"],
[0, 2**33, 2.0, 3e50, b"rabbit"], [0, 0, 0, 0, b""]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
)
def testMakeCSVDataset_withNAValuesAndFieldDelim(self):
"""Tests that datasets can be created from different delim and na_value."""
column_names = ["col%d" % i for i in range(5)]
inputs = [["0 1 2 3 4", "5 6 7 8 9"], ["10 11 12 13 14", "15 16 17 ? 19"]]
expected_output = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14],
[15, 16, 17, 0, 19]]
label = "col0"
self._test_dataset(
inputs,
expected_output=expected_output,
expected_keys=column_names,
column_names=column_names,
label_name=label,
batch_size=1,
num_epochs=1,
shuffle=False,
header=False,
na_value="?",
field_delim=" ",
)
def testMakeCSVDataset_withSelectCols(self):
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
"0,%s,2.0,3e50,rabbit" % str_int32_max
]]
expected_output = [[0, 2**33, 2.0, 3e50, b"rabbit"]]
select_cols = [1, 3, 4]
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
column_names=column_names,
column_defaults=[record_defaults[i] for i in select_cols],
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=select_cols,
)
# Can still do inference without provided defaults
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
column_names=column_names,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=select_cols,
)
# Can still do column name inference
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=select_cols,
)
# Can specify column names instead of indices
self._test_dataset(
inputs,
expected_output=[[x[i] for i in select_cols] for x in expected_output],
expected_keys=[column_names[i] for i in select_cols],
column_names=column_names,
batch_size=1,
num_epochs=1,
shuffle=False,
header=True,
select_columns=[column_names[i] for i in select_cols],
)
def testMakeCSVDataset_withSelectColsError(self):
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
column_names = ["col%d" % i for i in range(5)]
str_int32_max = str(2**33)
inputs = [[
",".join(x for x in column_names),
"0,%s,2.0,3e50,rabbit" % str_int32_max
]]
select_cols = [1, 3, 4]
filenames = self._setup_files(inputs)
with self.assertRaises(ValueError):
# Mismatch in number of defaults and number of columns selected,
# should raise an error
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=record_defaults,
column_names=column_names,
select_columns=select_cols)
with self.assertRaises(ValueError):
# Invalid column name should raise an error
self._make_csv_dataset(
filenames,
batch_size=1,
column_defaults=[[0]],
column_names=column_names,
label_name=None,
select_columns=["invalid_col_name"])
def testMakeCSVDataset_withShuffle(self):
record_defaults = [
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.int64),
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.float64),
constant_op.constant([], dtypes.string)
]
def str_series(st):
return ",".join(str(i) for i in range(st, st + 5))
column_names = ["col%d" % i for i in range(5)]
inputs = [
[",".join(x for x in column_names)
] + [str_series(5 * i) for i in range(15)],
[",".join(x for x in column_names)] +
[str_series(5 * i) for i in range(15, 20)],
]
filenames = self._setup_files(inputs)
total_records = 20
for batch_size in [1, 2]:
# Test that shuffling with the same seed produces the same result
dataset1 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=5,
num_epochs=2,
)
dataset2 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=5,
num_epochs=2,
)
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
for _ in range(total_records // batch_size):
batch1 = nest.flatten(self.evaluate(next1()))
batch2 = nest.flatten(self.evaluate(next2()))
for i in range(len(batch1)):
self.assertAllEqual(batch1[i], batch2[i])
# Test that shuffling with a different seed produces different results
dataset1 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=5,
num_epochs=2,
)
dataset2 = self._make_csv_dataset(
filenames,
column_defaults=record_defaults,
column_names=column_names,
batch_size=batch_size,
header=True,
shuffle=True,
shuffle_seed=6,
num_epochs=2,
)
next1 = self.getNext(dataset1)
next2 = self.getNext(dataset2)
all_equal = False
for _ in range(total_records // batch_size):
batch1 = nest.flatten(self.evaluate(next1()))
batch2 = nest.flatten(self.evaluate(next2()))
for i in range(len(batch1)):
all_equal = all_equal and np.array_equal(batch1[i], batch2[i])
self.assertFalse(all_equal)
def testIndefiniteRepeatShapeInference(self):
column_names = ["col%d" % i for i in range(5)]
inputs = [[",".join(x for x in column_names), "0,1,2,3,4", "5,6,7,8,9"], [
",".join(x for x in column_names), "10,11,12,13,14", "15,16,17,18,19"
]]
filenames = self._setup_files(inputs)
dataset = self._make_csv_dataset(filenames, batch_size=32, num_epochs=None)
for shape in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)):
self.assertEqual(32, shape[0])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/make_csv_dataset_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `SnapshotDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SnapshotDatasetTest(reader_dataset_ops_test_base.TFRecordDatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(SnapshotDatasetTest, self).setUp()
self.removeTFRecords()
def removeTFRecords(self):
for filename in self.test_filenames:
os.remove(filename)
self.test_filenames = []
def setUpTFRecord(self):
self._num_files = 10
self._num_records = 10
self.test_filenames = self._createFiles()
def makeSnapshotDirectory(self):
tmpdir = self.get_temp_dir()
tmpdir = os.path.join(tmpdir, "snapshot")
os.mkdir(tmpdir)
return tmpdir
def assertSnapshotDirectoryContains(
self, directory, num_fingerprints, num_runs_per_fp, num_snapshot_files):
dirlist = os.listdir(directory)
self.assertLen(dirlist, num_fingerprints)
for i in range(num_fingerprints):
fingerprint_dir = os.path.join(directory, dirlist[i])
fingerprint_dir_list = sorted(os.listdir(fingerprint_dir))
self.assertLen(fingerprint_dir_list, num_runs_per_fp + 1)
self.assertEqual(fingerprint_dir_list[num_runs_per_fp],
"snapshot.metadata")
for j in range(num_runs_per_fp):
run_dir = os.path.join(fingerprint_dir, fingerprint_dir_list[j])
run_dirlist = sorted(os.listdir(run_dir))
self.assertLen(run_dirlist, num_snapshot_files)
file_counter = 0
for filename in run_dirlist:
self.assertEqual(filename, "%08d.snapshot" % file_counter)
file_counter += 1
def testWriteDifferentPipelinesInOneDirectory(self):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1000)))
dataset = dataset_ops.Dataset.range(1001)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(1001)))
self.assertSnapshotDirectoryContains(tmpdir, 2, 1, 1)
def testWriteSnapshotMultipleSimultaneous(self):
tmpdir = self.makeSnapshotDirectory()
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(snapshot.snapshot(tmpdir))
next1 = self.getNext(dataset1)
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(snapshot.snapshot(tmpdir))
next2 = self.getNext(dataset2)
for _ in range(1000):
self.evaluate(next1())
self.evaluate(next2())
# we check that only one copy of the metadata has been written, and the
# one that lost the race would be in passthrough mode.
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@parameterized.parameters(snapshot.COMPRESSION_NONE,
snapshot.COMPRESSION_GZIP)
def testWriteSnapshotSimpleSuccessful(self, compression):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.range(1000)
dataset = dataset.apply(snapshot.snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, list(range(1000)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
def testWriteSnapshotRepeatAfterwards(self):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
dataset = dataset.repeat(10)
self.assertDatasetProduces(dataset, list(range(10)) * 10)
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
@parameterized.parameters(snapshot.COMPRESSION_NONE,
snapshot.COMPRESSION_GZIP)
def testReadSnapshotBackAfterWrite(self, compression):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.makeSnapshotDirectory()
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(tmpdir, compression=compression))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(
tmpdir, compression=compression))
self.assertDatasetProduces(dataset2, expected)
def testSameFingerprintWithDifferentInitializationOrder(self):
tmpdir = self.makeSnapshotDirectory()
dataset1 = dataset_ops.Dataset.range(0, 100)
dataset2 = dataset_ops.Dataset.range(100, 200)
dataset3 = dataset_ops.Dataset.range(200, 300)
dataset = dataset1.concatenate(dataset2).concatenate(dataset3)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
dataset4 = dataset_ops.Dataset.range(200, 300)
dataset5 = dataset_ops.Dataset.range(100, 200)
dataset6 = dataset_ops.Dataset.range(0, 100)
dataset = dataset6.concatenate(dataset5).concatenate(dataset4)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, list(range(300)))
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
def testExpiredSnapshotRewrite(self):
tmpdir = self.makeSnapshotDirectory()
dataset1 = dataset_ops.Dataset.range(1000)
dataset1 = dataset1.apply(
snapshot.snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next1 = self.getNext(dataset1)
# Don't finish reading dataset1, so it is never finalized
for _ in range(500):
self.evaluate(next1())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 1)
time.sleep(2)
# Creating dataset2 after we run through dataset1 due to eager mode, where
# the snapshot state is determined immediately upon dataset creation. We
# only want to determine the snapshot state for dataset2 after the first
# snapshot has expired.
dataset2 = dataset_ops.Dataset.range(1000)
dataset2 = dataset2.apply(
snapshot.snapshot(tmpdir, pending_snapshot_expiry_seconds=1))
next2 = self.getNext(dataset2)
for _ in range(500):
self.evaluate(next2())
self.assertSnapshotDirectoryContains(tmpdir, 1, 2, 1)
def testSpecifyShardSize(self):
tmpdir = self.makeSnapshotDirectory()
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(lambda x: gen_array_ops.broadcast_to(x, [1024, 1024]))
dataset = dataset.repeat(10)
dataset = dataset.apply(
snapshot.snapshot(tmpdir, shard_size_bytes=10 * 1024 * 1024))
next_fn = self.getNext(dataset)
for _ in range(10):
self.evaluate(next_fn())
self.assertSnapshotDirectoryContains(tmpdir, 1, 1, 4)
def testAdditionalOperationsAfterReadBack(self):
self.setUpTFRecord()
filenames = self.test_filenames
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
tmpdir = self.makeSnapshotDirectory()
dataset = core_readers._TFRecordDataset(filenames)
dataset = dataset.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset, expected)
# remove the original files and try to read the data back only from snapshot
self.removeTFRecords()
dataset2 = core_readers._TFRecordDataset(filenames)
dataset2 = dataset2.apply(snapshot.snapshot(tmpdir))
self.assertDatasetProduces(dataset2, expected)
expected_after = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
dataset3 = core_readers._TFRecordDataset(filenames)
dataset3 = dataset3.apply(snapshot.snapshot(tmpdir))
dataset3 = dataset3.map(lambda x: string_ops.substr_v2(x, 2, 1000))
self.assertDatasetProduces(dataset3, expected_after)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/snapshot_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `override_threadpool()` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.experimental.ops import threadpool
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class OverrideThreadpoolTest(test_base.DatasetTestBase,
parameterized.TestCase):
def _testNumThreadsHelper(self, num_threads, override_threadpool_fn):
def get_thread_id(_):
# Python creates a dummy thread object to represent the current
# thread when called from an "alien" thread (such as a
# `PrivateThreadPool` thread in this case). It does not include
# the TensorFlow-given display name, but it has a unique
# identifier that maps one-to-one with the underlying OS thread.
return np.array(threading.current_thread().ident).astype(np.int64)
dataset = (
dataset_ops.Dataset.range(1000).map(
lambda x: script_ops.py_func(get_thread_id, [x], dtypes.int64),
num_parallel_calls=32).apply(unique.unique()))
dataset = override_threadpool_fn(dataset)
next_element = self.getNext(dataset, requires_initialization=True)
thread_ids = []
try:
while True:
thread_ids.append(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
self.assertLen(thread_ids, len(set(thread_ids)))
self.assertNotEmpty(thread_ids)
if num_threads:
# NOTE(mrry): We don't control the thread pool scheduling, and
# so cannot guarantee that all of the threads in the pool will
# perform work.
self.assertLessEqual(len(thread_ids), num_threads)
@parameterized.named_parameters(
("1", 1, None),
("2", 2, None),
("3", 4, None),
("4", 8, None),
("5", 16, None),
("6", 4, -1),
("7", 4, 0),
("8", 4, 1),
("9", 4, 4),
)
def testNumThreadsDeprecated(self, num_threads, max_intra_op_parallelism):
def override_threadpool_fn(dataset):
return threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
num_threads,
max_intra_op_parallelism=max_intra_op_parallelism,
display_name="private_thread_pool_%d" % num_threads))
self._testNumThreadsHelper(num_threads, override_threadpool_fn)
@parameterized.named_parameters(
("1", 1, None),
("2", 2, None),
("3", 4, None),
("4", 8, None),
("5", 16, None),
("6", None, 0),
("7", None, 1),
("8", None, 4),
("9", 4, 0),
("10", 4, 1),
("11", 4, 4),
("12", None, None),
)
def testNumThreads(self, num_threads, max_intra_op_parallelism):
def override_threadpool_fn(dataset):
t_options = threading_options.ThreadingOptions()
if max_intra_op_parallelism is not None:
t_options.max_intra_op_parallelism = max_intra_op_parallelism
if num_threads is not None:
t_options.private_threadpool_size = num_threads
options = dataset_ops.Options()
options.experimental_threading = t_options
return dataset.with_options(options)
self._testNumThreadsHelper(num_threads, override_threadpool_fn)
def testMaxIntraOpParallelismAsGraphDefInternal(self):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset_ops._MaxIntraOpParallelismDataset(dataset, 1)
graph = graph_pb2.GraphDef().FromString(
self.evaluate(dataset._as_serialized_graph()))
self.assertTrue(
any([node.op != "MaxIntraOpParallelismDataset" for node in graph.node]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/override_threadpool_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.{from,to}_variant()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class VariantTest(test_base.DatasetTestBase):
def testRoundtripRange(self):
dataset = dataset_ops.Dataset.range(10)
variant = dataset_ops.to_variant(dataset)
dataset = dataset_ops.from_variant(variant,
dataset_ops.get_structure(dataset))
self.assertDatasetProduces(dataset, range(10))
self.assertEqual(self.evaluate(cardinality.cardinality(dataset)), 10)
def testRoundtripMap(self):
dataset = dataset_ops.Dataset.range(10).map(lambda x: x*x)
variant = dataset_ops.to_variant(dataset)
dataset = dataset_ops.from_variant(variant,
dataset_ops.get_structure(dataset))
self.assertDatasetProduces(dataset, [x * x for x in range(10)])
self.assertEqual(self.evaluate(cardinality.cardinality(dataset)), 10)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/variant_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class DirectedInterleaveDatasetTest(test_base.DatasetTestBase):
def testBasic(self):
selector_dataset = dataset_ops.Dataset.range(10).repeat(100)
input_datasets = [
dataset_ops.Dataset.from_tensors(i).repeat(100) for i in range(10)
]
dataset = interleave_ops._DirectedInterleaveDataset(selector_dataset,
input_datasets)
next_element = self.getNext(dataset)
for _ in range(100):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _normalize(self, vec):
return vec / vec.sum()
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def _testSampleFromDatasetsHelper(self, weights, num_datasets, num_samples):
# Create a dataset that samples each integer in `[0, num_datasets)`
# with probability given by `weights[i]`.
dataset = interleave_ops.sample_from_datasets([
dataset_ops.Dataset.from_tensors(i).repeat(None)
for i in range(num_datasets)
], weights)
dataset = dataset.take(num_samples)
next_element = self.getNext(dataset)
freqs = np.zeros([num_datasets])
for _ in range(num_samples):
freqs[self.evaluate(next_element())] += 1
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
return freqs
def testSampleFromDatasets(self):
random_seed.set_random_seed(1619)
num_samples = 5000
rand_probs = self._normalize(np.random.random_sample((15,)))
# Use chi-squared test to assert that the observed distribution matches the
# expected distribution. Based on the implementation in
# "third_party/tensorflow/python/kernel_tests/multinomial_op_test.py".
for probs in [[.85, .05, .1], rand_probs, [1.]]:
probs = np.asarray(probs)
classes = len(probs)
freqs = self._testSampleFromDatasetsHelper(probs, classes, num_samples)
self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2)
# Also check that `weights` as a dataset samples correctly.
probs_ds = dataset_ops.Dataset.from_tensors(probs).repeat()
freqs = self._testSampleFromDatasetsHelper(probs_ds, classes, num_samples)
self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2)
def testSelectFromDatasets(self):
words = [b"foo", b"bar", b"baz"]
datasets = [dataset_ops.Dataset.from_tensors(w).repeat() for w in words]
choice_array = np.random.randint(3, size=(15,), dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = interleave_ops.choose_from_datasets(datasets, choice_dataset)
next_element = self.getNext(dataset)
for i in choice_array:
self.assertEqual(words[i], self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testErrors(self):
with self.assertRaisesRegexp(ValueError,
r"vector of length `len\(datasets\)`"):
interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[0.25, 0.25, 0.25, 0.25])
with self.assertRaisesRegexp(TypeError, "`tf.float32` or `tf.float64`"):
interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[1, 1])
with self.assertRaisesRegexp(TypeError, "must have the same type"):
interleave_ops.sample_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(0.0)
])
with self.assertRaisesRegexp(TypeError, "tf.int64"):
interleave_ops.choose_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
], choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegexp(TypeError, "scalar"):
interleave_ops.choose_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
], choice_dataset=dataset_ops.Dataset.from_tensors([1.0]))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/directed_interleave_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.cardinality()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class NumElementsTest(test_base.DatasetTestBase, parameterized.TestCase):
"""Tests for `tf.data.experimental.cardinality()`."""
@parameterized.named_parameters(
# pylint: disable=g-long-lambda
("Batch1",
lambda: dataset_ops.Dataset.range(5).batch(2, drop_remainder=True), 2),
("Batch2",
lambda: dataset_ops.Dataset.range(5).batch(2, drop_remainder=False), 3),
("Batch3",
lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).batch(2),
cardinality.UNKNOWN),
("Batch4", lambda: dataset_ops.Dataset.range(5).repeat().batch(2),
cardinality.INFINITE),
("Cache1", lambda: dataset_ops.Dataset.range(5).cache(), 5),
("Cache2", lambda: dataset_ops.Dataset.range(5).cache("foo"), 5),
("Concatenate1", lambda: dataset_ops.Dataset.range(5).concatenate(
dataset_ops.Dataset.range(5)), 10),
("Concatenate2",
lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate(
dataset_ops.Dataset.range(5)), cardinality.UNKNOWN),
("Concatenate3", lambda: dataset_ops.Dataset.range(5).repeat().
concatenate(dataset_ops.Dataset.range(5)), cardinality.INFINITE),
("Concatenate4", lambda: dataset_ops.Dataset.range(5).concatenate(
dataset_ops.Dataset.range(5).filter(lambda _: True)),
cardinality.UNKNOWN),
("Concatenate5",
lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate(
dataset_ops.Dataset.range(5).filter(lambda _: True)),
cardinality.UNKNOWN),
("Concatenate6", lambda: dataset_ops.Dataset.range(5).repeat().
concatenate(dataset_ops.Dataset.range(5).filter(lambda _: True)),
cardinality.INFINITE),
("Concatenate7", lambda: dataset_ops.Dataset.range(5).concatenate(
dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE),
("Concatenate8",
lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).concatenate(
dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE),
("Concatenate9",
lambda: dataset_ops.Dataset.range(5).repeat().concatenate(
dataset_ops.Dataset.range(5).repeat()), cardinality.INFINITE),
("FlatMap", lambda: dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensors(0)), cardinality.UNKNOWN),
("Filter", lambda: dataset_ops.Dataset.range(5).filter(lambda _: True),
cardinality.UNKNOWN),
("FromTensors1", lambda: dataset_ops.Dataset.from_tensors(0), 1),
("FromTensors2", lambda: dataset_ops.Dataset.from_tensors((0, 1)), 1),
("FromTensorSlices1",
lambda: dataset_ops.Dataset.from_tensor_slices([0, 0, 0]), 3),
("FromTensorSlices2",
lambda: dataset_ops.Dataset.from_tensor_slices(([0, 0, 0], [1, 1, 1])),
3),
("Interleave1", lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0), cycle_length=1),
cardinality.UNKNOWN),
("Interleave2", lambda: dataset_ops.Dataset.range(5).interleave(
lambda _: dataset_ops.Dataset.from_tensors(0),
cycle_length=1,
num_parallel_calls=1), cardinality.UNKNOWN),
("Map1", lambda: dataset_ops.Dataset.range(5).map(lambda x: x), 5),
("Map2", lambda: dataset_ops.Dataset.range(5).map(
lambda x: x, num_parallel_calls=1), 5),
("PaddedBatch1", lambda: dataset_ops.Dataset.range(5).padded_batch(
2, [], drop_remainder=True), 2),
("PaddedBatch2", lambda: dataset_ops.Dataset.range(5).padded_batch(
2, [], drop_remainder=False), 3),
("PaddedBatch3", lambda: dataset_ops.Dataset.range(5).filter(
lambda _: True).padded_batch(2, []), cardinality.UNKNOWN),
("PaddedBatch4",
lambda: dataset_ops.Dataset.range(5).repeat().padded_batch(2, []),
cardinality.INFINITE),
("Prefetch", lambda: dataset_ops.Dataset.range(5).prefetch(buffer_size=1),
5),
("Range1", lambda: dataset_ops.Dataset.range(0), 0),
("Range2", lambda: dataset_ops.Dataset.range(5), 5),
("Range3", lambda: dataset_ops.Dataset.range(5, 10), 5),
("Range4", lambda: dataset_ops.Dataset.range(10, 5), 0),
("Range5", lambda: dataset_ops.Dataset.range(5, 10, 2), 3),
("Range6", lambda: dataset_ops.Dataset.range(10, 5, -2), 3),
("Repeat1", lambda: dataset_ops.Dataset.range(0).repeat(0), 0),
("Repeat2", lambda: dataset_ops.Dataset.range(1).repeat(0), 0),
("Repeat3", lambda: dataset_ops.Dataset.range(0).repeat(5), 0),
("Repeat4", lambda: dataset_ops.Dataset.range(1).repeat(5), 5),
("Repeat5", lambda: dataset_ops.Dataset.range(0).repeat(), 0),
("Repeat6", lambda: dataset_ops.Dataset.range(1).repeat(),
cardinality.INFINITE),
("Shuffle", lambda: dataset_ops.Dataset.range(5).shuffle(buffer_size=1),
5),
("Shard1", lambda: dataset_ops.Dataset.range(5).shard(2, 0), 3),
("Shard2", lambda: dataset_ops.Dataset.range(5).shard(8, 7), 0),
("Shard3",
lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).shard(2, 0),
cardinality.UNKNOWN),
("Shard4", lambda: dataset_ops.Dataset.range(5).repeat().shard(2, 0),
cardinality.INFINITE),
("Skip1", lambda: dataset_ops.Dataset.range(5).skip(2), 3),
("Skip2", lambda: dataset_ops.Dataset.range(5).skip(8), 0),
("Skip3",
lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).skip(2),
cardinality.UNKNOWN),
("Skip4", lambda: dataset_ops.Dataset.range(5).repeat().skip(2),
cardinality.INFINITE),
("Take1", lambda: dataset_ops.Dataset.range(5).take(2), 2),
("Take2", lambda: dataset_ops.Dataset.range(5).take(8), 5),
("Take3",
lambda: dataset_ops.Dataset.range(5).filter(lambda _: True).take(2),
cardinality.UNKNOWN),
("Take4", lambda: dataset_ops.Dataset.range(5).repeat().take(2), 2),
("Window1", lambda: dataset_ops.Dataset.range(5).window(
size=2, shift=2, drop_remainder=True), 2),
("Window2", lambda: dataset_ops.Dataset.range(5).window(
size=2, shift=2, drop_remainder=False), 3),
("Zip1", lambda: dataset_ops.Dataset.zip(dataset_ops.Dataset.range(5)),
5),
("Zip2", lambda: dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(5), dataset_ops.Dataset.range(3))), 3),
("Zip3", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range(
5), dataset_ops.Dataset.range(3).repeat())), 5),
("Zip4", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range(
5).repeat(), dataset_ops.Dataset.range(3).repeat())),
cardinality.INFINITE),
("Zip5", lambda: dataset_ops.Dataset.zip((dataset_ops.Dataset.range(
5), dataset_ops.Dataset.range(3).filter(lambda _: True))),
cardinality.UNKNOWN),
# pylint: enable=g-long-lambda
)
def testNumElements(self, dataset_fn, expected_result):
with self.cached_session() as sess:
self.assertEqual(
sess.run(cardinality.cardinality(dataset_fn())), expected_result)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/cardinality_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.group_by_reducer()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class GroupByReducerTest(test_base.DatasetTestBase):
def testSum(self):
reducer = grouping.Reducer(
init_func=lambda _: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
for i in range(1, 11):
dataset = dataset_ops.Dataset.range(2 * i).apply(
grouping.group_by_reducer(lambda x: x % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.scalar(),
expected_output=[(i - 1) * i, i * i])
def testAverage(self):
def reduce_fn(x, y):
return (x[0] * x[1] + math_ops.cast(y, dtypes.float32)) / (
x[1] + 1), x[1] + 1
reducer = grouping.Reducer(
init_func=lambda _: (0.0, 0.0),
reduce_func=reduce_fn,
finalize_func=lambda x, _: x)
for i in range(1, 11):
dataset = dataset_ops.Dataset.range(2 * i).apply(
grouping.group_by_reducer(
lambda x: math_ops.cast(x, dtypes.int64) % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.scalar(),
expected_output=[i - 1, i])
def testConcat(self):
components = np.array(list("abcdefghijklmnopqrst")).view(np.chararray)
reducer = grouping.Reducer(
init_func=lambda x: "",
reduce_func=lambda x, y: x + y[0],
finalize_func=lambda x: x)
for i in range(1, 11):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.from_tensor_slices(components),
dataset_ops.Dataset.range(2 * i))).apply(
grouping.group_by_reducer(lambda x, y: y % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.scalar(),
expected_output=[b"acegikmoqs" [:i], b"bdfhjlnprt" [:i]])
def testSparseSum(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1], dtype=np.int64)),
dense_shape=np.array([1, 1]))
reducer = grouping.Reducer(
init_func=lambda _: _sparse(np.int64(0)),
reduce_func=lambda x, y: _sparse(x.values[0] + y.values[0]),
finalize_func=lambda x: x.values[0])
for i in range(1, 11):
dataset = dataset_ops.Dataset.range(2 * i).map(_sparse).apply(
grouping.group_by_reducer(lambda x: x.values[0] % 2, reducer))
self.assertDatasetProduces(
dataset,
expected_shapes=tensor_shape.scalar(),
expected_output=[(i - 1) * i, i * i])
def testChangingStateShape(self):
def reduce_fn(x, _):
# Statically known rank, but dynamic length.
larger_dim = array_ops.concat([x[0], x[0]], 0)
# Statically unknown rank.
larger_rank = array_ops.expand_dims(x[1], 0)
return larger_dim, larger_rank
reducer = grouping.Reducer(
init_func=lambda x: ([0], 1),
reduce_func=reduce_fn,
finalize_func=lambda x, y: (x, y))
for i in range(1, 11):
dataset = dataset_ops.Dataset.from_tensors(np.int64(0)).repeat(i).apply(
grouping.group_by_reducer(lambda x: x, reducer))
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual([None], dataset_output_shapes[0].as_list())
self.assertIs(None, dataset_output_shapes[1].ndims)
get_next = self.getNext(dataset)
x, y = self.evaluate(get_next())
self.assertAllEqual([0] * (2**i), x)
self.assertAllEqual(np.array(1, ndmin=i), y)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testTypeMismatch(self):
reducer = grouping.Reducer(
init_func=lambda x: constant_op.constant(1, dtype=dtypes.int32),
reduce_func=lambda x, y: constant_op.constant(1, dtype=dtypes.int64),
finalize_func=lambda x: x)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegexp(
TypeError,
"The element types for the new state must match the initial state."):
dataset.apply(
grouping.group_by_reducer(lambda _: np.int64(0), reducer))
# TODO(b/78665031): Remove once non-scalar keys are supported.
def testInvalidKeyShape(self):
reducer = grouping.Reducer(
init_func=lambda x: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegexp(
ValueError, "`key_func` must return a single tf.int64 tensor."):
dataset.apply(
grouping.group_by_reducer(lambda _: np.int64((0, 0)), reducer))
# TODO(b/78665031): Remove once non-int64 keys are supported.
def testInvalidKeyType(self):
reducer = grouping.Reducer(
init_func=lambda x: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegexp(
ValueError, "`key_func` must return a single tf.int64 tensor."):
dataset.apply(
grouping.group_by_reducer(lambda _: "wrong", reducer))
def testTuple(self):
def init_fn(_):
return np.array([], dtype=np.int64), np.int64(0)
def reduce_fn(state, value):
s1, s2 = state
v1, v2 = value
return array_ops.concat([s1, [v1]], 0), s2 + v2
def finalize_fn(s1, s2):
return s1, s2
reducer = grouping.Reducer(init_fn, reduce_fn, finalize_fn)
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(10), dataset_ops.Dataset.range(10))).apply(
grouping.group_by_reducer(lambda x, y: np.int64(0), reducer))
get_next = self.getNext(dataset)
x, y = self.evaluate(get_next())
self.assertAllEqual(x, np.asarray([x for x in range(10)]))
self.assertEqual(y, 45)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.map_and_batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MapAndBatchTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("Default", None, None),
("SequentialCalls", 1, None),
("ParallelCalls", 2, None),
("ParallelBatches", None, 10),
)
def testMapAndBatch(self, num_parallel_calls, num_parallel_batches):
"""Test a dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset ->
# RepeatDataset(count) -> MapAndBatchDataset(square_3, batch_size).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def dataset_fn(batch_size, count):
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
count).apply(
batching.map_and_batch(
map_func=_map_fn,
batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
num_parallel_batches=num_parallel_batches))
return dataset
# Batch of a finite input, where the batch_size divides the
# total number of elements.
dataset = dataset_fn(14, 28)
get_next = self.getNext(dataset)
self.assertEqual(
[[None] + list(c.shape[1:]) for c in components],
[shape.as_list()
for shape in dataset_ops.get_legacy_output_shapes(dataset)])
num_batches = (28 * 7) // 14
for i in range(num_batches):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(14):
self.assertAllEqual(component[(i * 14 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Batch of a finite input, where the batch_size does not
# divide the total number of elements.
get_next = self.getNext(dataset_fn(8, 14))
# We expect (num_batches - 1) full-sized batches.
num_batches = int(math.ceil((14 * 7) / 8))
for i in range(num_batches - 1):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range(8):
self.assertAllEqual(component[(i * 8 + j) % 7]**2,
result_component[j])
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
for j in range((14 * 7) % 8):
self.assertAllEqual(component[((num_batches - 1) * 8 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Batch of an empty input should fail straight away.
self.assertDatasetProduces(dataset_fn(8, 0), expected_output=[])
# Empty batch should be an initialization time error.
with self.assertRaises(errors.InvalidArgumentError):
self.assertDatasetProduces(dataset_fn(0, 14), expected_output=[])
@parameterized.named_parameters(
("Even", False),
("Uneven", True),
)
def testMapAndBatchPartialBatch(self, drop_remainder):
dataset = (
dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(
lambda x: array_ops.reshape(x * x, [1]),
batch_size=4,
drop_remainder=drop_remainder)))
if drop_remainder:
self.assertEqual(
[4, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())
else:
self.assertEqual(
[None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())
expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]]]
if not drop_remainder:
expected_output.append([[64], [81]])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testMapAndBatchYieldsPartialBatch(self):
dataset = (
dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(lambda x: array_ops.reshape(x * x, [1]), 4)))
self.assertEqual(
[None, 1], dataset_ops.get_legacy_output_shapes(dataset).as_list())
expected_output = [[[0], [1], [4], [9]], [[16], [25], [36], [49]],
[[64], [81]]]
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testMapAndBatchParallelGetNext(self):
dataset = dataset_ops.Dataset.range(50000).apply(
batching.map_and_batch(lambda x: x, batch_size=100))
if context.executing_eagerly():
iterator = iter(dataset)
get_next = iterator._next_internal # pylint: disable=protected-access
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next
elements = []
for _ in range(100):
elements.append(get_next)
for i in range(5):
got = self.evaluate([element() for element in elements])
got.sort(key=lambda x: x[0])
expected = []
for j in range(100):
expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100))
self.assertAllEqual(got, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate([element() for element in elements])
def testMapAndBatchParallelGetNextDropRemainder(self):
dataset = dataset_ops.Dataset.range(49999).apply(
batching.map_and_batch(
lambda x: x, batch_size=100, drop_remainder=True))
if context.executing_eagerly():
iterator = iter(dataset)
get_next = iterator._next_internal # pylint: disable=protected-access
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next
elements = []
for _ in range(100):
elements.append(get_next)
for i in range(4):
got = self.evaluate([element() for element in elements])
got.sort(key=lambda x: x[0])
expected = []
for j in range(100):
expected.append(range(i * 10000 + j * 100, i * 10000 + (j + 1) * 100))
self.assertAllEqual(got, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate([element() for element in elements])
def testMapAndBatchSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
dataset = dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(_sparse, 5))
self.assertDatasetProduces(
dataset,
expected_output=[
sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
dense_shape=[5, 1]) for i in range(2)
])
def testMapAndBatchFails(self):
"""Test a dataset that maps a TF function across its input elements."""
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
dataset = dataset.apply(batching.map_and_batch(lambda x: x, 14))
get_next = self.getNext(dataset)
self.evaluate(get_next())
def testMapAndBatchShapeMismatch(self):
"""Test a dataset that maps a TF function across its input elements."""
def generator():
yield [1]
yield [2]
yield [3]
yield [[4, 5, 6]]
dataset = dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int32)
batch_size = 4
dataset = dataset.apply(batching.map_and_batch(lambda x: x, batch_size))
self.assertDatasetProduces(
dataset,
expected_error=(errors.InvalidArgumentError,
"number of elements does not match"))
def testMapAndBatchImplicitDispose(self):
# Tests whether a map and batch dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> RepeatDataset(1000) ->
# MapAndBatchDataset(f=square_3, batch_size=100).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
1000).apply(batching.map_and_batch(_map_fn, batch_size=100))
dataset = dataset.prefetch(5)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", 0),
("2", 5),
("3", 10),
("4", 90),
("5", 95),
("6", 99),
)
def testMapAndBatchMapError(self, threshold):
def raising_py_fn(i):
if i >= threshold:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(100).apply(
batching.map_and_batch(
lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),
batch_size=10))
get_next = self.getNext(dataset)
for i in range(threshold // 10):
self.assertAllEqual([i * 10 + j for j in range(10)],
self.evaluate(get_next()))
for i in range(threshold // 10, 10):
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(
("1", False, dtypes.bool),
("2", -42, dtypes.int8),
("3", -42, dtypes.int16),
("4", -42, dtypes.int32),
("5", -42, dtypes.int64),
("6", 42, dtypes.uint8),
("7", 42, dtypes.uint16),
("8", 42.0, dtypes.float16),
("9", 42.0, dtypes.float32),
("10", 42.0, dtypes.float64),
("11", b"hello", dtypes.string),
)
def testMapAndBatchTypes(self, element, dtype):
def gen():
yield element
dataset = dataset_ops.Dataset.from_generator(gen, dtype).repeat(100).apply(
batching.map_and_batch(lambda x: x, batch_size=10))
get_next = self.getNext(dataset)
for _ in range(10):
self.assertAllEqual([element for _ in range(10)],
self.evaluate(get_next()))
@parameterized.named_parameters(
("Identity", None, lambda x: x, None),
("Replicate", None, lambda x: (x, x), None),
("Swap", (None, None), lambda x, y: (y, x), None),
("Project", (None, None), lambda x, y: x, None),
)
def testShortCircuit(self, structure, map_fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat().apply(
batching.map_and_batch(map_fn, batch_size=10))
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = map_fn(
*self.evaluate(self.structuredElement(structure, shape=[10])))
else:
expected = map_fn(
self.evaluate(self.structuredElement(structure, shape=[10])))
self.assertAllEqual(expected, self.evaluate(get_next()))
def testShortCircuitCapturedInput(self):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat().apply(
batching.map_and_batch(lambda x: captured_t, batch_size=10))
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertAllEqual([42] * 10, self.evaluate(get_next()))
def testMapAndBatchControlFlow(self):
def map_fn(x):
previous_control_flow_v2_value = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
return_value = control_flow_ops.cond(x < 50, lambda: x + 1, lambda: x * x)
control_flow_util.ENABLE_CONTROL_FLOW_V2 = previous_control_flow_v2_value
return return_value
dataset = dataset_ops.Dataset.range(100).apply(
batching.map_and_batch(map_fn, batch_size=10))
get_next = self.getNext(dataset)
for i in range(10):
if i < 5:
self.assertAllEqual([i * 10 + j + 1 for j in range(10)],
self.evaluate(get_next()))
else:
self.assertAllEqual(
[((i * 10) + j) * ((i * 10) + j) for j in range(10)],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/map_and_batch_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.rejection_resample()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class RejectionResampleTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("InitialDistributionKnown", True),
("InitialDistributionUnknown", False))
def testDistribution(self, initial_known):
classes = np.random.randint(5, size=(20000,)) # Uniformly sampled
target_dist = [0.9, 0.05, 0.05, 0.0, 0.0]
initial_dist = [0.2] * 5 if initial_known else None
classes = math_ops.cast(classes, dtypes.int64) # needed for Windows build.
dataset = dataset_ops.Dataset.from_tensor_slices(classes).shuffle(
200, seed=21).map(lambda c: (c, string_ops.as_string(c))).repeat()
get_next = self.getNext(
dataset.apply(
resampling.rejection_resample(
target_dist=target_dist,
initial_dist=initial_dist,
class_func=lambda c, _: c,
seed=27)))
returned = []
while len(returned) < 4000:
returned.append(self.evaluate(get_next()))
returned_classes, returned_classes_and_data = zip(*returned)
_, returned_data = zip(*returned_classes_and_data)
self.assertAllEqual([compat.as_bytes(str(c))
for c in returned_classes], returned_data)
total_returned = len(returned_classes)
class_counts = np.array([
len([True for v in returned_classes if v == c])
for c in range(5)])
returned_dist = class_counts / total_returned
self.assertAllClose(target_dist, returned_dist, atol=1e-2)
@parameterized.named_parameters(
("OnlyInitial", True),
("NotInitial", False))
def testEdgeCasesSampleFromInitialDataset(self, only_initial_dist):
init_dist = [0.5, 0.5]
target_dist = [0.5, 0.5] if only_initial_dist else [0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test that this works.
num_samples = 100
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
dataset = dataset_ops.Dataset.from_tensor_slices(data_np)
# Reshape distribution.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist))
get_next = self.getNext(dataset)
returned = []
with self.assertRaises(errors.OutOfRangeError):
while True:
returned.append(self.evaluate(get_next()))
def testRandomClasses(self):
init_dist = [0.25, 0.25, 0.25, 0.25]
target_dist = [0.0, 0.0, 0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test a dirac-delta target distribution.
num_samples = 100
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
dataset = dataset_ops.Dataset.from_tensor_slices(data_np)
# Apply a random mapping that preserves the data distribution.
def _remap_fn(_):
return math_ops.cast(random_ops.random_uniform([1]) * num_classes,
dtypes.int32)[0]
dataset = dataset.map(_remap_fn)
# Reshape distribution.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist))
get_next = self.getNext(dataset)
returned = []
with self.assertRaises(errors.OutOfRangeError):
while True:
returned.append(self.evaluate(get_next()))
classes, _ = zip(*returned)
bincount = np.bincount(
np.array(classes),
minlength=num_classes).astype(np.float32) / len(classes)
self.assertAllClose(target_dist, bincount, atol=1e-2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/rejection_resample_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.unique()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class UniqueTest(test_base.DatasetTestBase):
def _testSimpleHelper(self, dtype, test_cases):
"""Test the `unique()` transformation on a list of test cases.
Args:
dtype: The `dtype` of the elements in each test case.
test_cases: A list of pairs of lists. The first component is the test
input that will be passed to the transformation; the second component
is the expected sequence of outputs from the transformation.
"""
# The `current_test_case` will be updated when we loop over `test_cases`
# below; declare it here so that the generator can capture it once.
current_test_case = []
dataset = dataset_ops.Dataset.from_generator(lambda: current_test_case,
dtype).apply(unique.unique())
for test_case, expected in test_cases:
current_test_case = test_case
self.assertDatasetProduces(dataset, [
compat.as_bytes(element) if dtype == dtypes.string else element
for element in expected
])
@test_util.run_deprecated_v1
def testSimpleInt(self):
for dtype in [dtypes.int32, dtypes.int64]:
self._testSimpleHelper(dtype, [
([], []),
([1], [1]),
([1, 1, 1, 1, 1, 1, 1], [1]),
([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 2, 4, 3, 2, 1, 2, 3, 4], [1, 2, 4, 3]),
([[1], [1, 1], [1, 1, 1]], [[1], [1, 1], [1, 1, 1]]),
([[1, 1], [1, 1], [2, 2], [3, 3], [1, 1]], [[1, 1], [2, 2], [3, 3]]),
])
@test_util.run_deprecated_v1
def testSimpleString(self):
self._testSimpleHelper(dtypes.string, [
([], []),
(["hello"], ["hello"]),
(["hello", "hello", "hello"], ["hello"]),
(["hello", "world"], ["hello", "world"]),
(["foo", "bar", "baz", "baz", "bar", "foo"], ["foo", "bar", "baz"]),
])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/unique_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.scan()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ScanTest(test_base.DatasetTestBase):
def _counting_dataset(self, start, scan_fn):
return dataset_ops.Dataset.from_tensors(0).repeat().apply(
scan_ops.scan(start, scan_fn))
def testCount(self):
def make_scan_fn(step):
return lambda state, _: (state + step, state)
def dataset_fn(start, step, take):
return self._counting_dataset(start, make_scan_fn(step)).take(take)
for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10),
(10, 2, 10), (10, -1, 10), (10, -2,
10)]:
next_element = self.getNext(dataset_fn(start_val, step_val, take_val))
for expected, _ in zip(
itertools.count(start_val, step_val), range(take_val)):
self.assertEqual(expected, self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testFibonacci(self):
data = dataset_ops.Dataset.from_tensors(1).repeat(None).apply(
scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1])))
next_element = self.getNext(data)
self.assertEqual(1, self.evaluate(next_element()))
self.assertEqual(1, self.evaluate(next_element()))
self.assertEqual(2, self.evaluate(next_element()))
self.assertEqual(3, self.evaluate(next_element()))
self.assertEqual(5, self.evaluate(next_element()))
self.assertEqual(8, self.evaluate(next_element()))
def testSparseCount(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def make_scan_fn(step):
return lambda state, _: (_sparse(state.values[0] + step), state)
def dataset_fn(start, step, take):
return self._counting_dataset(_sparse(start),
make_scan_fn(step)).take(take)
for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10),
(10, 2, 10), (10, -1, 10), (10, -2,
10)]:
next_element = self.getNext(dataset_fn(start_val, step_val, take_val))
for expected, _ in zip(
itertools.count(start_val, step_val), range(take_val)):
self.assertEqual(expected, self.evaluate(next_element()).values[0])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTensorArraySimple(self):
def scan_fn(ta, x):
return (ta.write(ta.size(), x), ta.stack())
start = tensor_array_ops.TensorArray(
size=0,
element_shape=[],
dtype=dtypes.int64,
dynamic_size=True)
start = start.write(0, -1)
ds = dataset_ops.Dataset.range(5).apply(scan_ops.scan(start, scan_fn))
self.assertDatasetProduces(
ds,
expected_output=[
[-1],
[-1, 0],
[-1, 0, 1],
[-1, 0, 1, 2],
[-1, 0, 1, 2, 3],
],
requires_initialization=True,
num_test_iterations=2)
def testTensorArrayWithCondReset(self):
def empty():
return tensor_array_ops.TensorArray(
size=0, element_shape=[], dtype=dtypes.int64, dynamic_size=True)
def scan_fn(ta, x):
updated = ta.write(ta.size(), x)
next_iter = control_flow_ops.cond(
math_ops.equal(x % 3, 0), empty, lambda: updated)
return (next_iter, updated.stack())
start = empty()
start = start.write(0, -1)
ds = dataset_ops.Dataset.range(6).apply(scan_ops.scan(start, scan_fn))
self.assertDatasetProduces(
ds,
expected_output=[
[-1, 0],
[1],
[1, 2],
[1, 2, 3],
[4],
[4, 5],
],
requires_initialization=True,
num_test_iterations=2)
def testTensorArrayWithCondResetByExternalCaptureBreaks(self):
empty_ta = tensor_array_ops.TensorArray(
size=0, element_shape=[], dtype=dtypes.int64, dynamic_size=True)
def scan_fn(ta, x):
updated = ta.write(ta.size(), x)
# Here, capture empty_ta from outside the function. However, it may be
# either a TF1-style TensorArray or an Eager-style TensorArray.
next_iter = control_flow_ops.cond(
math_ops.equal(x % 3, 0), lambda: empty_ta, lambda: updated)
return (next_iter, updated.stack())
start = empty_ta
start = start.write(0, -1)
with self.assertRaisesRegexp(
NotImplementedError,
r"construct a new TensorArray inside the function"):
dataset_ops.Dataset.range(6).apply(scan_ops.scan(start, scan_fn))
def testChangingStateShape(self):
# Test the fixed-point shape invariant calculations: start with
# initial values with known shapes, and use a scan function that
# changes the size of the state on each element.
def _scan_fn(state, input_value):
# Statically known rank, but dynamic length.
ret_longer_vector = array_ops.concat([state[0], state[0]], 0)
# Statically unknown rank.
ret_larger_rank = array_ops.expand_dims(state[1], 0)
return (ret_longer_vector, ret_larger_rank), (state, input_value)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(5).apply(
scan_ops.scan(([0], 1), _scan_fn))
self.assertEqual(
[None], dataset_ops.get_legacy_output_shapes(dataset)[0][0].as_list())
self.assertIs(
None, dataset_ops.get_legacy_output_shapes(dataset)[0][1].ndims)
self.assertEqual(
[], dataset_ops.get_legacy_output_shapes(dataset)[1].as_list())
next_element = self.getNext(dataset)
for i in range(5):
(longer_vector_val, larger_rank_val), _ = self.evaluate(next_element())
self.assertAllEqual([0] * (2**i), longer_vector_val)
self.assertAllEqual(np.array(1, ndmin=i), larger_rank_val)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testIncorrectStateType(self):
def _scan_fn(state, _):
return constant_op.constant(1, dtype=dtypes.int64), state
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegexp(
TypeError,
"The element types for the new state must match the initial state."):
dataset.apply(
scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn))
def testIncorrectReturnType(self):
def _scan_fn(unused_state, unused_input_value):
return constant_op.constant(1, dtype=dtypes.int64)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegexp(
TypeError,
"The scan function must return a pair comprising the new state and the "
"output value."):
dataset.apply(
scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn))
def testPreserveCardinality(self):
def scan_fn(state, val):
def py_fn(_):
raise StopIteration()
return state, script_ops.py_func(py_fn, [val], dtypes.int64)
dataset = dataset_ops.Dataset.from_tensors(0).apply(
scan_ops.scan(constant_op.constant(1), scan_fn))
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/scan_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline statistics gathering ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.experimental.ops import stats_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class StatsDatasetTest(stats_dataset_test_base.StatsDatasetTestBase):
def testBytesProduced(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).apply(
stats_ops.bytes_produced_stats("bytes_produced"))
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
expected_sum = 0.0
for i in range(100):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "bytes_produced", float(i + 1),
i + 2)
expected_sum += i * 8.0
self.assertStatisticsHasSum(handle, "bytes_produced", expected_sum, i + 2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "bytes_produced", 100.0, 101)
self.assertStatisticsHasSum(handle, "bytes_produced", expected_sum, 101)
def testLatencyStats(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency"))
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertEqual(i, self.evaluate(next_element()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency", float(i + 1),
i + 2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency", 100.0, 101)
def testPrefetchBufferUtilization(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(-1)
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle,
self.regexForNodeName("PrefetchDataset", "buffer_utilization"),
float(i + 1),
3 * i + 4,
offset=2)
self.assertStatisticsContains(
handle, self.regexForNodeName("PrefetchDataset", "buffer_capacity"),
3 * i + 4)
self.assertStatisticsContains(
handle,
self.regexForNodeName("PrefetchDataset", "buffer_size"),
3 * i + 4,
offset=1)
self.assertStatisticsHasRange(
handle,
self.regexForNodeName("PrefetchDataset", "buffer_utilization"),
0,
1,
3 * i + 4,
offset=2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle,
self.regexForNodeName("PrefetchDataset", "buffer_utilization"),
100,
301,
offset=2)
def testPrefetchBufferScalars(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).prefetch(1)
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertAllEqual(
np.array([i] * i, dtype=np.int64), self.evaluate(next_element()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("PrefetchDataset", "buffer_capacity"),
1, 3 * i + 4)
self.assertStatisticsHasScalarValue(
handle,
self.regexForNodeName("PrefetchDataset", "buffer_size"),
1,
3 * i + 4,
offset=1)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testFilteredElementsStats(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(101).filter(
lambda x: math_ops.equal(math_ops.mod(x, 3), 0))
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(34):
self.assertEqual(i * 3, self.evaluate(next_element()))
handle = self.getHandle(aggregator)
if i != 0:
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "dropped_elements"),
float(i * 2))
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "filtered_elements"),
float(i + 1))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "dropped_elements"),
67.0)
self.assertStatisticsHasScalarValue(
handle, self.regexForNodeName("FilterDataset", "filtered_elements"),
34.0)
def testReinitialize(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency"))
dataset = self.datasetExperimentalStats(dataset, aggregator)
for j in range(5):
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertEqual(i, self.evaluate(next_element()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency",
float((j * 100) + i + 1),
(j * 100) + i + 2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency", (j + 1) * 100.0,
(j * 100) + 101)
def testNoAggregatorRegistered(self):
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency"))
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertEqual(i, self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testMultipleTags(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.latency_stats("record_latency_2"))
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertEqual(i, self.evaluate(next_element()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle, "record_latency", float(i + 1), 2 * i + 3, offset=1)
self.assertStatisticsHasCount(handle, "record_latency_2", float(i + 1),
2 * i + 3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle, "record_latency", 100.0, 201, offset=1)
self.assertStatisticsHasCount(handle, "record_latency_2", 100.0, 201)
def testRepeatedTags(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency")).apply(
stats_ops.latency_stats("record_latency"))
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertEqual(i, self.evaluate(next_element()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency",
float(2 * (i + 1)), 2 * i + 3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency", 200.0, 201)
def testMultipleIteratorsSameAggregator(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency"))
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element1 = self.getNext(dataset, requires_initialization=True)
next_element2 = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency",
float(2 * (i + 1)), 2 * i + 3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element1())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element2())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(handle, "record_latency", 200.0, 201)
def testMultipleDatasetWithPrefixes(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency"))
dataset = self.datasetExperimentalStats(
dataset, aggregator, prefix="dataset1")
dataset2 = dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats("record_latency"))
dataset2 = self.datasetExperimentalStats(
dataset2, aggregator, prefix="dataset2")
next_element1 = self.getNext(dataset, requires_initialization=True)
next_element2 = self.getNext(dataset2, requires_initialization=True)
for i in range(100):
self.assertEqual(i * 2, self.evaluate(next_element1() + next_element2()))
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle, "dataset1::record_latency", float(i + 1), 2 * i + 3, offset=1)
self.assertStatisticsHasCount(handle, "dataset2::record_latency",
float(i + 1), 2 * i + 3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element1())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element2())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle, "dataset1::record_latency", 100.0, 201, offset=1)
self.assertStatisticsHasCount(handle, "dataset2::record_latency", 100.0,
201)
def testMultiplePrefetchStats(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.range(10).prefetch(
2).filter(lambda x: math_ops.equal(math_ops.mod(x, 2), 0)).prefetch(1)
dataset = self.datasetExperimentalStats(dataset, aggregator)
next_element = self.getNext(dataset, requires_initialization=True)
for i in range(5):
self.assertEqual(i * 2, self.evaluate(next_element()))
handle = self.getHandle(aggregator)
# TODO(shivaniagarwal): using exact name of prefetch node than the regex,
# to differentiate between two prefetch. This might break in future, at
# which point, it would be best to disable this test.
self.assertStatisticsHasScalarValue(
handle, "PrefetchDataset/_5::buffer_capacity", 2)
self.assertStatisticsContains(handle, "PrefetchDataset/_5::buffer_size")
self.assertStatisticsHasScalarValue(
handle, "PrefetchDataset/_8::buffer_capacity", 1)
self.assertStatisticsContains(handle, "PrefetchDataset/_8::buffer_size")
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
class ThreadUtilizationStatsTest(stats_dataset_test_base.StatsDatasetTestBase):
def testMapBufferUtilization(self):
def dataset_fn():
return dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),
num_parallel_calls=4)
self.parallelCallsStats(
dataset_fn, {"ParallelMapDataset"}, 10, function_processing_time=True)
def testMapAutoTuneBufferUtilization(self):
def dataset_fn():
return dataset_ops.Dataset.range(10).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x])),
num_parallel_calls=dataset_ops.AUTOTUNE)
self.parallelCallsStats(
dataset_fn, {"ParallelMapDataset"}, 10, function_processing_time=True)
def testInterleaveAutoTuneBufferUtilization(self):
def dataset_fn():
def interleave_fn(_):
return dataset_ops.Dataset.range(
10).map(lambda x: array_ops.tile([x], ops.convert_to_tensor([x])))
return dataset_ops.Dataset.range(1).interleave(
interleave_fn,
cycle_length=1,
num_parallel_calls=dataset_ops.AUTOTUNE)
self.parallelCallsStats(dataset_fn, {"ParallelInterleaveDatasetV2"}, 10)
def testMapAndBatchAutoTuneBufferUtilization(self):
def dataset_fn():
return dataset_ops.Dataset.range(100).apply(
batching.map_and_batch(
lambda x: array_ops.tile([x], ops.convert_to_tensor([2])),
num_parallel_calls=dataset_ops.AUTOTUNE,
batch_size=16))
num_output = 100 // 16 + 1
self.parallelCallsStats(
dataset_fn, {"ExperimentalMapAndBatchDataset"},
num_output,
check_elements=False,
function_processing_time=True)
class FeatureStatsDatasetTest(
stats_dataset_test_base.StatsDatasetTestBase,
reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase):
def testFeaturesStats(self):
num_epochs = 5
total_records = num_epochs * self._num_records
batch_size = 2
def dataset_fn():
return self.make_batch_feature(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size,
shuffle=True,
shuffle_seed=5,
drop_final_batch=False)
num_output = total_records // batch_size
if total_records % batch_size:
num_output = total_records // batch_size + 1
self.parallelCallsStats(
dataset_fn, {"ExperimentalParseExampleDataset"},
num_output,
check_elements=False)
aggregator = stats_aggregator.StatsAggregator()
dataset = self.datasetExperimentalStats(
dataset_fn(), aggregator, prefix="record_stats")
next_element = self.getNext(dataset, requires_initialization=True)
for _ in range(num_output):
self.evaluate(next_element())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle,
self.regexForNodeName("record_stats::ExperimentalParseExampleDataset",
"features_count"), total_records)
self.assertStatisticsHasCount(
handle,
self.regexForNodeName("record_stats::ExperimentalParseExampleDataset",
"feature_values_count"), total_records)
self.assertStatisticsHasSum(
handle,
self.regexForNodeName("record_stats::ExperimentalParseExampleDataset",
"features_count"), total_records * 4)
self.assertStatisticsHasSum(
handle,
self.regexForNodeName("record_stats::ExperimentalParseExampleDataset",
"feature_values_count"),
self._sum_keywords(1) * num_epochs + 3 * total_records)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/stats_dataset_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import get_single_element
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import function
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class GetSingleElementTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(
("Zero", 0, 1),
("Five", 5, 1),
("Ten", 10, 1),
("Empty", 100, 1, errors.InvalidArgumentError, "Dataset was empty."),
("MoreThanOne", 0, 2, errors.InvalidArgumentError,
"Dataset had more than one element."),
)
def testGetSingleElement(self, skip, take, error=None, error_msg=None):
def make_sparse(x):
x_1d = array_ops.reshape(x, [1])
x_2d = array_ops.reshape(x, [1, 1])
return sparse_tensor.SparseTensor(x_2d, x_1d, x_1d)
dataset = dataset_ops.Dataset.range(100).skip(
skip).map(lambda x: (x * x, make_sparse(x))).take(take)
if error is None:
dense_val, sparse_val = self.evaluate(
get_single_element.get_single_element(dataset))
self.assertEqual(skip * skip, dense_val)
self.assertAllEqual([[skip]], sparse_val.indices)
self.assertAllEqual([skip], sparse_val.values)
self.assertAllEqual([skip], sparse_val.dense_shape)
else:
with self.assertRaisesRegexp(error, error_msg):
self.evaluate(get_single_element.get_single_element(dataset))
def testWindow(self):
"""Test that `get_single_element()` can consume a nested dataset."""
def flat_map_func(ds):
batched = ds.batch(2)
element = get_single_element.get_single_element(batched)
return dataset_ops.Dataset.from_tensors(element)
dataset = dataset_ops.Dataset.range(10).window(2).flat_map(flat_map_func)
self.assertDatasetProduces(
dataset, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]])
def testSideEffect(self):
counter_var = variables.Variable(0)
def increment_fn(x):
counter_var.assign_add(1)
return x
def dataset_fn():
return dataset_ops.Dataset.range(1).map(increment_fn)
@function.defun
def fn():
_ = get_single_element.get_single_element(dataset_fn())
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 1)
def testAutomaticControlDependencies(self):
counter_var = variables.Variable(1)
def increment_fn(x):
counter_var.assign(counter_var + 1)
return x
def multiply_fn(x):
counter_var.assign(counter_var * 2)
return x
def dataset1_fn():
return dataset_ops.Dataset.range(1).map(increment_fn)
def dataset2_fn():
return dataset_ops.Dataset.range(1).map(multiply_fn)
@function.defun
def fn():
_ = get_single_element.get_single_element(dataset1_fn())
_ = get_single_element.get_single_element(dataset2_fn())
return "hello"
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), b"hello")
self.assertEqual(self.evaluate(counter_var), 4)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/get_single_element_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.TFRecordWriter`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.ops import writers
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.eager import function
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import python_io
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class TFRecordWriterTest(test_base.DatasetTestBase):
def setUp(self):
super(TFRecordWriterTest, self).setUp()
self._num_records = 7
def writer_fn(self, filename, compression_type=""):
input_dataset = readers.TFRecordDataset([filename], compression_type)
return writers.TFRecordWriter(self._outputFilename(),
compression_type).write(input_dataset)
def _record(self, i):
return compat.as_bytes("Record %d" % (i))
def _createFile(self, options=None):
filename = self._inputFilename()
writer = python_io.TFRecordWriter(filename, options)
for i in range(self._num_records):
writer.write(self._record(i))
writer.close()
return filename
def _inputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.in.txt")
def _outputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.out.txt")
def testWrite(self):
self.evaluate(self.writer_fn(self._createFile()))
for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
self.assertAllEqual(self._record(i), r)
def testWriteZLIB(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.ZLIB)
self.evaluate(
self.writer_fn(self._createFile(options), compression_type="ZLIB"))
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
def testWriteGZIP(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.GZIP)
self.evaluate(
self.writer_fn(self._createFile(options), compression_type="GZIP"))
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
def testFailDataset(self):
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write("whoops")
def testFailDType(self):
input_dataset = dataset_ops.Dataset.from_tensors(10)
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset)
def testFailShape(self):
input_dataset = dataset_ops.Dataset.from_tensors([["hello"], ["world"]])
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset)
def testSideEffect(self):
def writer_fn():
input_dataset = readers.TFRecordDataset(self._createFile())
return writers.TFRecordWriter(self._outputFilename()).write(input_dataset)
@function.defun
def fn():
_ = writer_fn()
return "hello"
self.assertEqual(self.evaluate(fn()), b"hello")
for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
self.assertAllEqual(self._record(i), r)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/tf_record_writer_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import threading
import time
import numpy as np
from six.moves import zip_longest
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ParallelInterleaveTest(test_base.DatasetTestBase):
def setUp(self):
self.error = None
self.repeat_count = 2
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
self.read_coordination_events = {}
self.write_coordination_events = {}
# input values [4, 5, 6] are the common case for the tests; set defaults
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i] = threading.Event()
def dataset_fn(self, input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
def map_py_fn(x):
self.write_coordination_events[x].wait()
self.write_coordination_events[x].clear()
self.read_coordination_events[x].release()
if self.error:
err = self.error
self.error = None
raise err # pylint: disable=raising-bad-type
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
def _interleave(self, lists, cycle_length, block_length):
"""Python implementation of interleave used for testing."""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def testPythonImplementation(self):
input_lists = [[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]
# Cycle length 1 acts like `Dataset.flat_map()`.
expected_elements = itertools.chain(*input_lists)
for expected, produced in zip(expected_elements,
self._interleave(input_lists, 1, 1)):
self.assertEqual(expected, produced)
# Cycle length > 1.
expected_elements = [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5,
6, 5, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationBlockLength(self):
input_lists = [[4] * 4, [5] * 5, [6] * 6] * 2
expected_elements = [
4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6, 5, 5, 6, 6, 5,
5, 6, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 2))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationEmptyLists(self):
input_lists = [[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [],
[6, 6, 6, 6, 6, 6]]
expected_elements = [
4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def _clear_coordination_events(self):
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i].clear()
def _allow_all_map_threads(self):
for i in range(4, 7):
self.write_coordination_events[i].set()
def _testSingleThreaded(self, sloppy=False, prefetch_input_elements=0):
# cycle_length=1,block_length=1 acts like `Dataset.interleave()` and
# `Dataset.flat_map()` and is single-threaded. No synchronization required.
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=1,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=prefetch_input_elements))
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 1):
self.write_coordination_events[expected_element].set()
self.assertEqual(expected_element * expected_element,
self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testSingleThreaded(self):
self._testSingleThreaded()
def testSingleThreadedSloppy(self):
self._testSingleThreaded(sloppy=True)
def testSingleThreadedPrefetch1Itr(self):
self._testSingleThreaded(prefetch_input_elements=1)
def testSingleThreadedPrefetch1ItrSloppy(self):
self._testSingleThreaded(prefetch_input_elements=1, sloppy=True)
def testSingleThreadedRagged(self):
# Tests a sequence with wildly different elements per iterator.
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([3, 7, 4]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=1))
# Add coordination values for 3 and 7
self.read_coordination_events[3] = threading.Semaphore(0)
self.write_coordination_events[3] = threading.Event()
self.read_coordination_events[7] = threading.Semaphore(0)
self.write_coordination_events[7] = threading.Event()
for expected_element in self._interleave(
[[3] * 3, [7] * 7, [4] * 4] * self.repeat_count, 2, 1):
self.write_coordination_events[expected_element].set()
output = self.evaluate(next_element())
self.assertEqual(expected_element * expected_element, output)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _testTwoThreadsNoContention(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContention(self):
self._testTwoThreadsNoContention()
def testTwoThreadsNoContentionSloppy(self):
self._testTwoThreadsNoContention(sloppy=True)
def _testTwoThreadsNoContentionWithRaces(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContentionWithRaces(self):
self._testTwoThreadsNoContentionWithRaces()
def testTwoThreadsNoContentionWithRacesSloppy(self):
self._testTwoThreadsNoContentionWithRaces(sloppy=True)
def _testTwoThreadsNoContentionBlockLength(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContentionBlockLength(self):
self._testTwoThreadsNoContentionBlockLength()
def testTwoThreadsNoContentionBlockLengthSloppy(self):
self._testTwoThreadsNoContentionBlockLength(sloppy=True)
def _testTwoThreadsNoContentionWithRacesAndBlocking(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContentionWithRacesAndBlocking(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking()
def testTwoThreadsNoContentionWithRacesAndBlockingSloppy(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking(sloppy=True)
def _testEmptyInput(self, sloppy=False):
# Empty input.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testEmptyInput(self):
self._testEmptyInput()
def testEmptyInputSloppy(self):
self._testEmptyInput(sloppy=True)
def _testNonEmptyInputIntoEmptyOutputs(self, sloppy=False):
# Non-empty input leading to empty output.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([0, 0, 0]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testNonEmptyInputIntoEmptyOutputs(self):
self._testNonEmptyInputIntoEmptyOutputs()
def testNonEmptyInputIntoEmptyOutputsSloppy(self):
self._testNonEmptyInputIntoEmptyOutputs(sloppy=True)
def _testPartiallyEmptyOutputs(self, sloppy=False, prefetch_input_elements=1):
race_indices = {2, 8, 14} # Sequence points when sloppy mode has race conds
# Mixture of non-empty and empty interleaved datasets.
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 0, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=prefetch_input_elements))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [], [6] * 6] * self.repeat_count, 2, 1)):
self.write_coordination_events[expected_element].set()
# First event starts the worker threads. Additionally, when running the
# sloppy case with prefetch_input_elements=0, we get stuck if we wait
# for the read coordination event for certain event orderings in the
# presence of finishing iterators.
if done_first_event and not (sloppy and (i in race_indices)):
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event or (sloppy and (i in race_indices)):
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
def testPartiallyEmptyOutputs(self):
self._testPartiallyEmptyOutputs()
def testPartiallyEmptyOutputsSloppy(self):
self._testPartiallyEmptyOutputs(sloppy=True, prefetch_input_elements=0)
def testDelayedOutputSloppy(self):
# Explicitly control the sequence of events to ensure we correctly avoid
# head-of-line blocking.
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=True,
buffer_output_elements=1,
prefetch_input_elements=0))
mis_ordering = [
4, 4, 5, 4, 5, 5, 4, 5, 6, 6, 6, 5, 4, 4, 6, 6, 4, 4, 6, 5, 6, 6, 6, 6,
5, 5, 5, 5, 6, 6
]
for element in mis_ordering:
self.write_coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(next_element()))
self.assertTrue(self.read_coordination_events[element].acquire(False))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testBlockLengthWithContentionSloppy(self):
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=True,
buffer_output_elements=1,
prefetch_input_elements=1))
# Test against a generating sequence that differs from the uncontended
# case, in order to prove sloppy correctness.
for i, expected_element in enumerate(
self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count,
cycle_length=2,
block_length=3)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _testEarlyExit(self, sloppy=False):
# Exiting without consuming all input should not block
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=3,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
for i in range(4, 7):
self.write_coordination_events[i].set()
elem = self.evaluate(next_element()) # Start all workers
# Allow the one successful worker to progress beyond the py_func again.
elem = int(math.sqrt(elem))
self.write_coordination_events[elem].set()
self.read_coordination_events[elem].acquire()
# Allow the prefetch to succeed
for i in range(4, 7):
self.read_coordination_events[i].acquire()
self.write_coordination_events[i].set()
def testEarlyExit(self):
self._testEarlyExit()
def testEarlyExitSloppy(self):
self._testEarlyExit(sloppy=True)
def _testTooManyReaders(self, sloppy=False):
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64))
return dataset
dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6])
dataset = dataset.repeat(self.repeat_count)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy))
get_next = self.getNext(dataset)
output_values = []
for _ in range(30):
output_values.append(self.evaluate(get_next()))
expected_values = self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2)
self.assertItemsEqual(output_values, expected_values)
def testTooManyReaders(self):
self._testTooManyReaders()
def testTooManyReadersSloppy(self):
self._testTooManyReaders(sloppy=True)
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, cycle_length=1))
get_next = self.getNext(dataset)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testErrorsInOutputFn(self):
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
except_on_element_indices = set([3])
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if i in except_on_element_indices:
self.error = ValueError()
self.write_coordination_events[expected_element].set()
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
self.write_coordination_events[expected_element].set()
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testErrorsInInputFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).map(
map_fn).repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testErrorsInInterleaveFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
y = script_ops.py_func(map_py_fn, [x], x.dtype)
dataset = dataset.repeat(y)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testShutdownRace(self):
dataset = dataset_ops.Dataset.range(20)
map_fn = lambda x: dataset_ops.Dataset.range(20 * x, 20 * (x + 1))
dataset = dataset.apply(
interleave_ops.parallel_interleave(
map_fn,
cycle_length=3,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
dataset = dataset.batch(32)
results = []
for _ in range(2):
elements = []
next_element = self.getNext(dataset)
try:
while True:
elements.extend(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
results.append(elements)
self.assertAllEqual(results[0], results[1])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/parallel_interleave_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.make_tf_record_dataset()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MakeTFRecordDatasetTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase):
def _read_test(self, batch_size, num_epochs, file_index=None,
num_parallel_reads=1, drop_final_batch=False, parser_fn=False):
if file_index is None:
file_pattern = self.test_filenames
else:
file_pattern = self.test_filenames[file_index]
if parser_fn:
fn = lambda x: string_ops.substr(x, 1, 999)
else:
fn = None
outputs = self.getNext(
readers.make_tf_record_dataset(
file_pattern=file_pattern,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=fn,
num_parallel_reads=num_parallel_reads,
drop_final_batch=drop_final_batch,
shuffle=False))
self._verify_records(
outputs,
batch_size,
file_index,
num_epochs=num_epochs,
interleave_cycle_length=num_parallel_reads,
drop_final_batch=drop_final_batch,
use_parser_fn=parser_fn)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(outputs())
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
# Basic test: read from file 0.
self._read_test(batch_size, num_epochs, 0)
# Basic test: read from file 1.
self._read_test(batch_size, num_epochs, 1)
# Basic test: read from both files.
self._read_test(batch_size, num_epochs)
# Basic test: read from both files, with parallel reads.
self._read_test(batch_size, num_epochs, num_parallel_reads=8)
def testDropFinalBatch(self):
for batch_size in [1, 2, 10]:
for num_epochs in [1, 3]:
# Read from file 0.
self._read_test(batch_size, num_epochs, 0, drop_final_batch=True)
# Read from both files.
self._read_test(batch_size, num_epochs, drop_final_batch=True)
# Read from both files, with parallel reads.
self._read_test(batch_size, num_epochs, num_parallel_reads=8,
drop_final_batch=True)
def testParserFn(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
for drop_final_batch in [False, True]:
self._read_test(batch_size, num_epochs, parser_fn=True,
drop_final_batch=drop_final_batch)
self._read_test(batch_size, num_epochs, num_parallel_reads=8,
parser_fn=True, drop_final_batch=drop_final_batch)
def _shuffle_test(self, batch_size, num_epochs, num_parallel_reads=1,
seed=None):
dataset = readers.make_tf_record_dataset(
file_pattern=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size,
num_parallel_reads=num_parallel_reads,
shuffle=True,
shuffle_seed=seed)
next_element = self.getNext(dataset)
first_batches = []
try:
while True:
first_batches.append(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
next_element = self.getNext(dataset)
second_batches = []
try:
while True:
second_batches.append(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
self.assertEqual(len(first_batches), len(second_batches))
if seed is not None:
# if you set a seed, should get the same results
for i in range(len(first_batches)):
self.assertAllEqual(first_batches[i], second_batches[i])
expected = []
for f in range(self._num_files):
for r in range(self._num_records):
expected.extend([self._record(f, r)] * num_epochs)
for batches in (first_batches, second_batches):
actual = []
for b in batches:
actual.extend(b)
self.assertAllEqual(sorted(expected), sorted(actual))
def testShuffle(self):
for batch_size in [1, 2]:
for num_epochs in [1, 3]:
for num_parallel_reads in [1, 2]:
# Test that all expected elements are produced
self._shuffle_test(batch_size, num_epochs, num_parallel_reads)
# Test that elements are produced in a consistent order if
# you specify a seed.
self._shuffle_test(batch_size, num_epochs, num_parallel_reads,
seed=21345)
def testIndefiniteRepeatShapeInference(self):
dataset = readers.make_tf_record_dataset(
file_pattern=self.test_filenames, num_epochs=None, batch_size=32)
for shape in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)):
self.assertEqual(32, shape[0])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/make_tf_record_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `ShuffleAndRepeatFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ShuffleAndRepeatFusionTest(test_base.DatasetTestBase):
def testShuffleAndRepeatFusion(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["ShuffleAndRepeat"])).shuffle(10).repeat(2)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.shuffle_and_repeat_fusion = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
for _ in range(2):
results = []
for _ in range(10):
results.append(self.evaluate(get_next()))
self.assertAllEqual([x for x in range(10)], sorted(results))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/shuffle_and_repeat_fusion_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `FilterFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _filter_fusion_test_cases():
"""Generates test cases for the FilterFusion optimization."""
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
tests = []
filters = [take_all, is_zero, greater]
identity = lambda x: x
for x, predicate_1 in enumerate(filters):
for y, predicate_2 in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), identity,
[predicate_1, predicate_2]))
for z, predicate_3 in enumerate(filters):
tests.append(("Mixed{}{}{}".format(x, y, z), identity,
[predicate_1, predicate_2, predicate_3]))
take_all_multiple = lambda x, y: constant_op.constant(True)
# Multi output
tests.append(("Multi1", lambda x: (x, x),
[take_all_multiple, take_all_multiple]))
tests.append(("Multi2", lambda x: (x, 2), [
take_all_multiple,
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)
]))
return tuple(tests)
@test_util.run_all_in_graph_and_eager_modes
class FilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(*_filter_fusion_test_cases())
def testFilterFusion(self, map_function, predicates):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "Filter",
"MemoryCacheImpl"])).map(map_function)
for predicate in predicates:
dataset = dataset.filter(predicate)
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_fusion = True
dataset = dataset.with_options(options)
expected_output = []
for x in range(5):
r = map_function(x)
filtered = False
for predicate in predicates:
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if not self.evaluate(b):
filtered = True
break
if not filtered:
expected_output.append(r)
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/filter_fusion_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_ModelDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ModelDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
def testAutotuneOption(self):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.map(lambda x: x).apply(
optimization.assert_next(["Model"]))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.assertEqual(0, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/model_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.assert_next()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class AssertNextDatasetTest(test_base.DatasetTestBase):
def testAssertNext(self):
dataset = dataset_ops.Dataset.from_tensors(0).apply(
optimization.assert_next(["Map"])).map(lambda x: x)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
def testAssertNextInvalid(self):
dataset = dataset_ops.Dataset.from_tensors(0).apply(
optimization.assert_next(["Whoops"])).map(lambda x: x)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
"Asserted Whoops transformation at offset 0 but encountered "
"Map transformation instead."))
def testAssertNextShort(self):
dataset = dataset_ops.Dataset.from_tensors(0).apply(
optimization.assert_next(["Map", "Whoops"])).map(lambda x: x)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_error=(
errors.InvalidArgumentError,
"Asserted next 2 transformations but encountered only 1."))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/assert_next_dataset_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental._ChooseFastestBranchDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ChooseFastestBranchDatasetTest(test_base.DatasetTestBase,
parameterized.TestCase):
def testSimple(self):
dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3, 4])
def branch(dataset):
return dataset.map(lambda x: x)
choose_fastest = optimization._ChooseFastestBranchDataset(
dataset, [branch, branch])
self.assertDatasetProduces(
choose_fastest,
expected_output=[0, 1, 2, 3, 4],
expected_shapes=dataset_ops.get_legacy_output_shapes(dataset))
def testCaptureSimple(self):
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
choose_fastest = optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1])
self.assertDatasetProduces(
choose_fastest, expected_output=list(range(1, 11)))
def testDifferentFunctions(self):
dataset = dataset_ops.Dataset.range(100)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
choose_fastest = optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], ratio_numerator=10)
self.assertDatasetProduces(
choose_fastest,
expected_output=[list(range(10 * x, 10 * x + 10)) for x in range(10)])
def testWithRepeatBeforeAndAfter(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
choose_fastest = optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], ratio_numerator=10)
choose_fastest = choose_fastest.repeat(10)
self.assertDatasetProduces(
choose_fastest, expected_output=[[0] * 10 for _ in range(10)])
def testWithPrefetch(self):
"""Should maintain ordering even if the branches do prefetching."""
dataset = dataset_ops.Dataset.range(100)
def branch_0(dataset):
return dataset.prefetch(1)
def branch_1(dataset):
return dataset.prefetch(2)
choose_fastest = optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1])
self.assertDatasetProduces(choose_fastest, expected_output=list(range(100)))
def testWithMoreOutputThanInput(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
choose_fastest = optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=100,
num_elements_per_branch=100)
self.assertDatasetProduces(choose_fastest, expected_output=[0] * 1000)
def testWithBadNumElements(self):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
def make_dataset():
return optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=100,
num_elements_per_branch=10)
expected_error_msg = ("`num_elements_per_branch` must be divisible by "
"`ratio_denominator`")
if context.executing_eagerly():
with self.assertRaisesRegexp(errors.InvalidArgumentError,
expected_error_msg):
make_dataset()
else:
choose_fastest = make_dataset()
self.assertDatasetProduces(
choose_fastest,
expected_error=(errors.InvalidArgumentError, expected_error_msg))
def testErrorWithRepeat(self):
dataset = dataset_ops.Dataset.from_tensors(0)
def branch(dataset):
return dataset.repeat(10)
choose_fastest = optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=10,
num_elements_per_branch=10)
self.assertDatasetProduces(
choose_fastest,
expected_error=(
errors.InvalidArgumentError,
"Cannot create more than one WrapperIterator per WrapperDataset."),
expected_error_iter=2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_branch_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapParallelization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _map_parallelization_test_cases():
"""Generates test cases for the MapParallelization optimization."""
identity = lambda x: x
increment = lambda x: x + 1
def assert_greater(x):
assert_op = control_flow_ops.Assert(math_ops.greater(x, -1), [x])
with ops.control_dependencies([assert_op]):
return x
return (("Identity", identity, True),
("Increment", increment, True),
("AssertGreater", assert_greater, True))
@test_util.run_all_in_graph_and_eager_modes
class MapParallelizationTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(*_map_parallelization_test_cases())
def testMapParallelization(self, function, should_be_parallel):
next_nodes = ["ParallelMap"] if should_be_parallel else ["Map"]
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(next_nodes)).map(function)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[function(x) for x in range(5)])
def testMapParallelizationWithCapturedConstant(self):
"""Tests that functions with captured constants are parallelized."""
captured_t = constant_op.constant(42, dtype=dtypes.int64)
def fn(x):
return x + captured_t
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["ParallelMap"])).map(fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[x + 42 for x in range(5)])
def testMapParallelizationWithCapturedVariable(self):
"""Tests that functions with captured variables are not parallelized."""
captured_t = variables.Variable(42, dtype=dtypes.int64)
def fn(x):
return x + captured_t
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map"])).map(fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_parallelization = True
dataset = dataset.with_options(options)
self.evaluate(variables.global_variables_initializer())
self.assertDatasetProduces(
dataset,
expected_output=[x + 42 for x in range(5)],
requires_initialization=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_parallelization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_OptimizeDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.experimental.ops import threadpool
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
def _generate_captured_refvar_test_cases():
"""Generates testcases.
Returns:
A list of tuples of (testcase_name, make_dataset_fn). make_dataset_fn takes
a tf.Variable as input and creates a test dataset that uses that variable.
"""
def make_map_dataset(var):
return dataset_ops.Dataset.from_tensors(0).map(lambda x: x + var)
def make_flat_map_dataset(var):
return dataset_ops.Dataset.from_tensors(
0).flat_map(lambda _: dataset_ops.Dataset.from_tensors(var))
def make_filter_dataset(var):
return dataset_ops.Dataset.from_tensors(0).filter(lambda x: x < var)
def make_map_and_batch_dataset(var):
def map_fn(x):
return x + var
return dataset_ops.Dataset.from_tensors(0).apply(
batching.map_and_batch(map_fn, 1))
def make_group_by_reducer_dataset(var):
reducer = grouping.Reducer(
init_func=lambda _: 0,
reduce_func=lambda x, y: x,
finalize_func=lambda _: var)
return dataset_ops.Dataset.range(5).apply(
grouping.group_by_reducer(lambda x: x % 2, reducer))
def make_group_by_window_dataset(var):
def reduce_fn(key, bucket):
del key, bucket
return dataset_ops.Dataset.from_tensors(var)
return dataset_ops.Dataset.from_tensors(0).repeat(10).apply(
grouping.group_by_window(lambda _: 0, reduce_fn, 10))
def make_scan_dataset(var):
return dataset_ops.Dataset.from_tensors(0).apply(
scan_ops.scan(
0, lambda old_state, elem: (old_state + 1, elem + old_state + var)))
return [
# Core datasets
("Map", make_map_dataset),
("FlatMap", make_flat_map_dataset),
("Filter", make_filter_dataset),
# Experimental datasets
("MapAndBatch", make_map_and_batch_dataset),
("GroupByReducer", make_group_by_reducer_dataset),
("GroupByWindow", make_group_by_window_dataset),
("Scan", make_scan_dataset)
]
@test_util.run_all_in_graph_and_eager_modes
class OptimizeDatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
def testOptimizationStatefulFunction(self):
dataset = dataset_ops.Dataset.range(
10).map(lambda _: random_ops.random_uniform([])).batch(10)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
@test_util.run_v1_only("b/123902160")
def testSkipEagerOptimizationLargeInputFromTensor(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None))
dataset = dataset_ops.Dataset.from_tensors(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([512, 1024, 1025], np.int32)})
self.evaluate(get_next)
@test_util.run_v1_only("b/123902160")
def testSkipEagerOptimizationLargeInputFromTensorSlices(self):
input_t = array_ops.placeholder(dtypes.int32, (None, None, None, None))
dataset = dataset_ops.Dataset.from_tensor_slices(input_t)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, {input_t: np.ones([1, 512, 1024, 1025], np.int32)})
self.evaluate(get_next)
def testOptimizationNestedDataset(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
def testOptimizationNestedDatasetWithModifiedRetval(self):
def flat_map_fn(_):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.assert_next(["MapAndBatch"]))
# Should be fused by map and batch fusion
dataset = dataset.map(lambda x: x)
dataset = dataset.batch(1)
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.flat_map(flat_map_fn)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[[0]])
def testOptimizationThreadPoolDataset(self):
dataset = dataset_ops.Dataset.range(10).batch(10)
dataset = threadpool.override_threadpool(
dataset,
threadpool.PrivateThreadPool(
2, display_name="private_thread_pool_%d" % 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(10))],
requires_initialization=True)
def testOptimizationNonSerializable(self):
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.assert_next(["FiniteSkip"]))
dataset = dataset.skip(0) # Should not be removed by noop elimination
dataset = dataset.apply(optimization.non_serializable())
dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
dataset = dataset.skip(0) # Should be removed by noop elimination
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
def testOptimizationNonSerializableAsDirectInput(self):
"""Tests that non-serializable dataset can be OptimizeDataset's input."""
dataset = dataset_ops.Dataset.from_tensors(0)
dataset = dataset.apply(optimization.non_serializable())
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=[0])
@parameterized.named_parameters(_generate_captured_refvar_test_cases())
@test_util.run_v1_only("RefVariables are not supported in eager mode.")
def testSkipEagerOptimizationWithCapturedRefVar(self, dataset_fn):
"""Tests that default optimizations are disabled with ref variables."""
variable = variable_scope.get_variable(
"v", initializer=0, use_resource=False)
assign_op = variable.assign_add(1)
# Check that warning is logged.
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
unoptimized_dataset = dataset_fn(variable)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.map_and_batch_fusion = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_it = dataset_ops.make_initializable_iterator(optimized_dataset)
self.assertGreaterEqual(len(w), 1)
expected = ("tf.data static optimizations are not compatible with "
"tf.Variable. The following optimizations will be disabled: %s."
" To enable optimizations, use resource variables instead by "
"calling `tf.enable_resource_variables()` at the start of the "
"program." % (", ".join(options._static_optimizations())))
self.assertTrue(any([expected in str(warning) for warning in w]))
# Check that outputs are the same in the optimized and unoptimized cases,
# when the variable value is changing.
unoptimized_it = dataset_ops.make_initializable_iterator(
unoptimized_dataset)
with ops.control_dependencies([assign_op]):
unoptimized_output = unoptimized_it.get_next()
optimized_output = optimized_it.get_next()
self.evaluate(variable.initializer)
self.evaluate((unoptimized_it.initializer, optimized_it.initializer))
while True:
try:
unoptimized, optimized = self.evaluate((unoptimized_output,
optimized_output))
self.assertEqual(unoptimized, optimized)
except errors.OutOfRangeError:
break
def testOptimizationEnabledByDefault(self):
"""Tests that some optimizations are applied to datasets by default."""
options = dataset_ops.Options()
expected_optimizations = [
"map_and_batch_fusion",
"noop_elimination",
"shuffle_and_repeat_fusion",
]
self.assertEqual(
set(options._static_optimizations()), set(expected_optimizations))
def testOptimizationDisableDefault(self):
"""Tests that we can disable all static optimizations enabled by default.
If the `apply_default_optimizations` optimization options flag is False,
only explicitly enabled optimizations will be applied.
"""
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.hoist_random_uniform = True
options.experimental_optimization.noop_elimination = True
expected_optimizations = [
"hoist_random_uniform",
"noop_elimination",
]
self.assertEqual(
set(options._static_optimizations()), set(expected_optimizations))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/optimize_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
def _map_fusion_test_cases():
"""Generates test cases for the MapFusion optimization."""
identity = lambda x: x
increment = lambda x: x + 1
def increment_and_square(x):
y = x + 1
return y * y
functions = [identity, increment, increment_and_square]
tests = []
for i, fun1 in enumerate(functions):
for j, fun2 in enumerate(functions):
tests.append((
"Test{}{}".format(i, j),
[fun1, fun2],
))
for k, fun3 in enumerate(functions):
tests.append((
"Test{}{}{}".format(i, j, k),
[fun1, fun2, fun3],
))
swap = lambda x, n: (n, x)
tests.append((
"Swap1",
[lambda x: (x, 42), swap],
))
tests.append((
"Swap2",
[lambda x: (x, 42), swap, swap],
))
return tuple(tests)
@test_util.run_all_in_graph_and_eager_modes
class MapFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
@parameterized.named_parameters(*_map_fusion_test_cases())
def testMapFusion(self, functions):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "MemoryCacheImpl"]))
for function in functions:
dataset = dataset.map(function)
dataset = dataset.cache()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_fusion = True
dataset = dataset.with_options(options)
expected_output = []
for x in range(5):
r = x
for function in functions:
if isinstance(r, tuple):
r = function(*r) # Pass tuple as multiple arguments.
else:
r = function(r)
expected_output.append(r)
self.assertDatasetProduces(dataset, expected_output=expected_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_fusion_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `LatencyAllEdges` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class LatencyAllEdgesTest(stats_dataset_test_base.StatsDatasetTestBase):
def testLatencyStatsOptimization(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.from_tensors(1).apply(
optimization.assert_next(
["LatencyStats", "Map", "LatencyStats", "Prefetch",
"LatencyStats"])).map(lambda x: x * x).prefetch(1)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_stats.latency_all_edges = True
options.experimental_stats.aggregator = aggregator
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[1],
requires_initialization=True,
num_test_iterations=1)
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::TensorDataset"), 1)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::MapDataset"), 1)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::PrefetchDataset"), 1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `HoistRandomUniform` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
def _hoist_random_uniform_test_cases():
"""Generates test cases for the HoistRandomUniform optimization."""
plus_one = lambda x: x + 1
def random(_):
return random_ops.random_uniform([],
minval=1,
maxval=10,
dtype=dtypes.float32,
seed=42)
def random_with_assert(x):
y = random(x)
assert_op = control_flow_ops.Assert(math_ops.greater_equal(y, 1), [y])
with ops.control_dependencies([assert_op]):
return y
twice_random = lambda x: (random(x) + random(x)) / 2.
tests = [("PlusOne", plus_one, False), ("RandomUniform", random, True),
("RandomWithAssert", random_with_assert, True),
("TwiceRandom", twice_random, False)]
return tuple(tests)
@test_util.run_all_in_graph_and_eager_modes
class HoistRandomUniformTest(test_base.DatasetTestBase, parameterized.TestCase):
def _testDataset(self, dataset):
previous_result = 0
get_next = self.getNext(dataset)
for _ in range(5):
result = self.evaluate(get_next())
self.assertLessEqual(1, result)
self.assertLessEqual(result, 10)
# This checks if the result is somehow random by checking if we are not
# generating the same values.
self.assertNotEqual(previous_result, result)
previous_result = result
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@parameterized.named_parameters(*_hoist_random_uniform_test_cases())
def testHoisting(self, function, will_optimize):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(
["Zip[0]", "Map"] if will_optimize else ["Map"])).map(function)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.hoist_random_uniform = True
dataset = dataset.with_options(options)
self._testDataset(dataset)
def testCapturedInputs(self):
a = constant_op.constant(1, dtype=dtypes.float32)
b = constant_op.constant(0, dtype=dtypes.float32)
some_tensor = math_ops.mul(a, b)
def random_with_capture(_):
return some_tensor + random_ops.random_uniform(
[], minval=1, maxval=10, dtype=dtypes.float32, seed=42)
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Zip[0]", "Map"])).map(random_with_capture)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.hoist_random_uniform = True
dataset = dataset.with_options(options)
self._testDataset(dataset)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/hoist_random_uniform_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `FilterWithRandomUniformFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class FilterWithRandomUniformFusionTest(test_base.DatasetTestBase):
def testFilterWithRandomUniformFusion(self):
dataset = dataset_ops.Dataset.range(10000000).apply(
optimization.assert_next(["Sampling"]))
dataset = dataset.filter(lambda _: random_ops.random_uniform([]) < 0.05)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_with_random_uniform_fusion = True
dataset = dataset.with_options(options)
get_next = self.getNext(dataset)
self.evaluate(get_next())
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/filter_with_random_uniform_fusion_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental._ChooseFastestDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ChooseFastestDatasetTest(test_base.DatasetTestBase,
parameterized.TestCase):
def testChooseFastestSimple(self):
dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3, 4])
merge = optimization._ChooseFastestDataset([dataset, dataset])
self.assertDatasetProduces(
merge,
expected_output=[0, 1, 2, 3, 4],
expected_shapes=dataset_ops.get_legacy_output_shapes(dataset))
def testChooseFastestManyInputs(self):
dataset = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3, 4])
merge = optimization._ChooseFastestDataset([dataset for _ in range(5)])
self.assertDatasetProduces(
merge,
expected_output=[0, 1, 2, 3, 4],
expected_shapes=dataset_ops.get_legacy_output_shapes(dataset))
def testChooseFastest(self):
dataset = dataset_ops.Dataset.range(600)
f = lambda x: 2 * x
dataset_a = dataset.batch(50).map(f)
dataset_b = dataset.map(f).batch(50)
merge = optimization._ChooseFastestDataset([dataset_a, dataset_b])
self.assertDatasetProduces(
merge,
expected_output=[
[i * 2 for i in range(j * 50, (j + 1) * 50)] for j in range(12)
],
expected_shapes=dataset_ops.get_legacy_output_shapes(dataset_a))
@parameterized.named_parameters(
("Shapes", [0], [[1, 2, 3]], "must have compatible output shapes."),
("Types", [0], [0.0], "must have the same output types."),
("NumComponents", [0], ([0], [1]), "must have the same output types."),
("Cardinality", [1, 2, 3], [1], "must have compatible cardinalities."))
def testChooseFastestErrorWithIncompatibleInput(self, slices_a, slices_b,
error_msg):
dataset_a = dataset_ops.Dataset.from_tensor_slices(slices_a)
dataset_b = dataset_ops.Dataset.from_tensor_slices(slices_b)
# The error is raised at dataset creation time.
if context.executing_eagerly():
with self.assertRaises(errors.InvalidArgumentError):
merge = optimization._ChooseFastestDataset([dataset_a, dataset_b])
else:
merge = optimization._ChooseFastestDataset([dataset_a, dataset_b])
self.assertDatasetProduces(
merge, expected_error=(errors.InvalidArgumentError, error_msg))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/choose_fastest_dataset_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndBatchFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class MapAndBatchFusionTest(test_base.DatasetTestBase):
def testMapAndBatchFusion(self):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["MapAndBatch"])).map(lambda x: x * x).batch(10)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset, expected_output=[[x * x for x in range(10)]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_and_batch_fusion_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `NoopElimination` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class NoopEliminationTest(test_base.DatasetTestBase):
def testNoopElimination(self):
a = constant_op.constant(1, dtype=dtypes.int64)
b = constant_op.constant(2, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
dataset = dataset_ops.Dataset.range(5)
dataset = dataset.apply(
optimization.assert_next(
["FiniteRepeat", "FiniteSkip", "Prefetch", "MemoryCacheImpl"]))
dataset = dataset.repeat(some_tensor).skip(5).take(-1).skip(0).repeat(
1).prefetch(0).prefetch(1).cache()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.noop_elimination = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, expected_output=range(5))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/noop_elimination_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapAndFilterFusion` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
def _map_and_filter_fusion_test_cases():
"""Generates test cases for the MapAndFilterFusion optimization."""
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
@test_util.run_all_in_graph_and_eager_modes
class MapAndFilterFusionTest(test_base.DatasetTestBase, parameterized.TestCase):
def _testMapAndFilter(self, dataset, function, predicate):
expected_output = []
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if self.evaluate(b):
expected_output.append(r)
self.assertDatasetProduces(dataset, expected_output=expected_output)
@parameterized.named_parameters(*_map_and_filter_fusion_test_cases())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map", "Filter",
"Map"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
def testCapturedInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with captured inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(["Map",
"Filter"])).map(function).filter(predicate)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
self._testMapAndFilter(dataset, function, predicate)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_and_filter_fusion_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `MapVectorization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
def _generate_unary_cwise_math_cases():
# TODO(rachelim): Consolidate tests with pfor when APIs are somewhat shared.
bitwise_cases = [("Invert", bitwise_ops.invert)]
logical_cases = [("LogicalNot", math_ops.logical_not)]
complex_cases = [
("Angle", math_ops.angle),
("ComplexAbs", math_ops.abs),
("Conj", math_ops.conj),
("Imag", math_ops.imag),
("Real", math_ops.real),
]
real_cases = [
("Abs", math_ops.abs),
("Acos", math_ops.acos),
("Acosh", lambda x: math_ops.acosh(1 + math_ops.square(x))),
("Asin", math_ops.asin),
("Asinh", math_ops.asinh),
("Atan", math_ops.atan),
("Atanh", math_ops.atanh),
("BesselI0e", math_ops.bessel_i0e),
("BesselI1e", math_ops.bessel_i1e),
("Ceil", math_ops.ceil),
("Cos", math_ops.cos),
("Cosh", math_ops.cosh),
("Digamma", math_ops.digamma),
("Elu", nn.elu),
("Erf", math_ops.erf),
("Erfc", math_ops.erfc),
("Exp", math_ops.exp),
("Expm1", math_ops.expm1),
("Floor", math_ops.floor),
("Inv", math_ops.inv),
("IsFinite", math_ops.is_finite),
("IsInf", math_ops.is_inf),
("Lgamma", math_ops.lgamma),
("Log", math_ops.log),
("Log1p", math_ops.log1p),
("Neg", math_ops.negative),
("Reciprocal", math_ops.reciprocal),
("Relu", nn.relu),
("Relu6", nn.relu6),
("Rint", math_ops.rint),
("Round", math_ops.round),
("Rsqrt", math_ops.rsqrt),
("Selu", nn.selu),
("Sigmoid", math_ops.sigmoid),
("Sign", math_ops.sign),
("Sin", math_ops.sin),
("Sinh", math_ops.sinh),
("Softplus", nn.softplus),
("Softsign", nn.softsign),
("Sqrt", math_ops.sqrt),
("Square", math_ops.square),
("Tan", math_ops.tan),
("Tanh", math_ops.tanh),
]
random_input = np.random.rand(3, 5)
complex_component = np.random.rand(3, 5)
random_int = np.random.randint(0, 10, (7, 3, 5))
def bitwise_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(random_int)
def logical_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(random_input > 0)
def random_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(random_input)
def complex_dataset_factory():
return dataset_ops.Dataset.from_tensor_slices(
math_ops.complex(random_input, complex_component))
case_factory_pairs = [
(bitwise_cases, bitwise_dataset_factory),
(logical_cases, logical_dataset_factory),
(complex_cases, complex_dataset_factory),
(real_cases, random_dataset_factory),
]
return [(case[0], case[1], factory)
for cases, factory in case_factory_pairs
for case in cases]
def _generate_binary_cwise_math_cases():
bitwise_cases = [("BitwiseAnd", bitwise_ops.bitwise_and),
("BitwiseOr", bitwise_ops.bitwise_or),
("BitwiseXor", bitwise_ops.bitwise_xor),
("LeftShift", bitwise_ops.left_shift),
("RightShift", bitwise_ops.right_shift)]
logical_cases = [("LogicalAnd", math_ops.logical_and),
("LogicalOr", math_ops.logical_or)]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)), x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
real_cases = [
("Add", math_ops.add),
("AddV2", math_ops.add_v2),
("Atan2", math_ops.atan2),
("Complex", math_ops.complex),
("DivNoNan", math_ops.div_no_nan),
("Equal", math_ops.equal),
("FloorDiv", math_ops.floor_div),
("FloorMod", math_ops.floor_mod),
("Greater", math_ops.greater),
("GreaterEqual", math_ops.greater_equal),
("Igamma", math_ops.igamma),
("Igammac", math_ops.igammac),
("IgammaGradA", math_ops.igamma_grad_a),
("Less", math_ops.less),
("LessEqual", math_ops.less_equal),
("Maximum", math_ops.maximum),
("Minimum", math_ops.minimum),
("Mod", math_ops.mod),
("Mul", math_ops.multiply),
("NotEqual", math_ops.not_equal),
("Polygamma", safe_polygamma),
("Pow", math_ops.pow),
("RealDiv", math_ops.divide),
("SquareDifference", math_ops.squared_difference),
("Sub", math_ops.subtract),
("TruncateMod", math_ops.truncate_mod),
("Zeta", safe_zeta),
]
# Exercises broadcasting capabilities
x = np.random.rand(7, 3, 5)
y = np.random.rand(3, 5)
x_int = np.random.randint(0, 10, (7, 3, 5))
y_int = np.random.randint(0, 10, (3, 5))
def bitwise_dataset_factory():
return dataset_ops.Dataset.from_tensors((x_int, y_int))
def logical_dataset_factory():
return dataset_ops.Dataset.from_tensors((x > 0, y > 0))
def random_dataset_factory():
return dataset_ops.Dataset.from_tensors((x, y))
case_factory_pairs = [
(bitwise_cases, bitwise_dataset_factory),
(logical_cases, logical_dataset_factory),
(real_cases, random_dataset_factory),
]
return [(case[0], case[1], factory)
for cases, factory in case_factory_pairs
for case in cases]
def _generate_cwise_test_cases():
return _generate_unary_cwise_math_cases() + _generate_binary_cwise_math_cases(
)
def _generate_csv_test_case():
def csv_factory():
return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a",
"2.4:5:c"]).repeat(5)
def decode_csv_fn(x):
return parsing_ops.decode_csv(
x,
record_defaults=[
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.string)
],
field_delim=":")
return decode_csv_fn, csv_factory
def _generate_parse_single_example_test_case():
# When sparse tensors are used, map_vectorization is not
# attempted because the output_shapes of the map dataset are not defined.
# TODO(rachelim): Consider being more lax with checking the output_shapes of
# the map node.
def parse_example_factory():
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
return dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
})).SerializeToString() for i in range(10)
]))
def parse_single_example_fn(x):
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
}
return parsing_ops.parse_single_example(x, features)
return parse_single_example_fn, parse_example_factory
def _generate_optimization_test_cases():
def base_dataset_factory():
return dataset_ops.Dataset.from_tensors(np.random.rand(10, 3)).repeat(5)
rand_val = np.random.rand(1, 1, 1, 1, 1, 1)
csv_test_case = _generate_csv_test_case()
parse_fn, parse_base = _generate_parse_single_example_test_case()
def dense_output_only_parse_fn(x):
# Since we haven't implemented a vectorizer for SerializeSparse, any
# function with sparse outputs will only be naively vectorized.
parse_result = parse_fn(x)
return [
y for y in parse_result if not isinstance(y, sparse_tensor.SparseTensor)
]
def map_fn_with_cycle(x):
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
return control_flow_ops.while_loop(c, b, [x])
# Misc test cases
test_cases = [
("Basic", lambda x: (x, x + 1), base_dataset_factory),
("Broadcast", lambda x: x + rand_val, base_dataset_factory),
("Cycle", map_fn_with_cycle, lambda: dataset_ops.Dataset.from_tensors(1)),
("Const", lambda x: 2, base_dataset_factory),
("Cast", lambda x: math_ops.cast(x, dtypes.float64),
base_dataset_factory),
("Reshape", lambda x: array_ops.reshape(x, (-1, 30)),
base_dataset_factory),
("Transpose", array_ops.transpose, base_dataset_factory),
("Unpack", array_ops.unstack, base_dataset_factory),
("UnpackNegativeAxis", lambda x: array_ops.unstack(x, axis=-1),
base_dataset_factory),
# Parsing ops
("DecodeCSV", csv_test_case[0], csv_test_case[1]),
("ParseSingleExample", parse_fn, parse_base),
("ParseSingleExampleDenseOutputOnly", dense_output_only_parse_fn,
parse_base),
] + _generate_cwise_test_cases()
return [{
"testcase_name":
x[0] + "Parallel" if num_parallel_calls is not None else x[0],
"map_fn":
x[1],
"base_dataset_factory":
x[2],
"num_parallel_calls":
num_parallel_calls
} for x in test_cases for num_parallel_calls in (None, 12)]
@test_util.run_all_in_graph_and_eager_modes
class MapVectorizationTest(test_base.DatasetTestBase, parameterized.TestCase):
def _enable_map_vectorization(self, dataset, use_choose=True):
options = dataset_ops.Options()
opt_options = options.experimental_optimization
opt_options.map_vectorization.enabled = True
opt_options.map_vectorization.use_choose_fastest = use_choose
return dataset.with_options(options)
def _get_test_datasets(self,
base_dataset,
map_fn,
num_parallel_calls=None,
expect_optimized=True):
"""Given base dataset and map fn, creates test datasets.
Returns a tuple of (unoptimized dataset, optimized dataset). The
unoptimized dataset has the assertion that Batch follows Map. The optimized
dataset has the assertion that Map follows Batch, and has the
"map_vectorization" optimization applied.
Args:
base_dataset: Input dataset to map->batch
map_fn: Map function to use
num_parallel_calls: (Optional.) num_parallel_calls argument for map
expect_optimized: (Optional.) Whether we expect the optimization to take
place, in which case we will assert that Batch is followed by Map,
otherwise Map followed by Batch. Defaults to True.
Returns:
Tuple of (unoptimized dataset, optimized dataset).
"""
map_node_name = "Map" if num_parallel_calls is None else "ParallelMap"
def _make_dataset(node_names):
dataset = base_dataset.apply(optimization.assert_next(node_names))
dataset = dataset.map(map_fn, num_parallel_calls)
dataset = dataset.batch(100)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = False
dataset = dataset.with_options(options)
return dataset
unoptimized = _make_dataset([map_node_name, "Batch"])
# Note that because of the `ChooseDataset` fork, we can't use `assert_next`
# to verify the optimization result.
optimized = _make_dataset(["ChooseFastestBranch"]
if expect_optimized else [map_node_name, "Batch"])
optimized = self._enable_map_vectorization(optimized)
return unoptimized, optimized
@parameterized.named_parameters(_generate_optimization_test_cases())
def testOptimization(self, map_fn, base_dataset_factory, num_parallel_calls):
base_dataset = base_dataset_factory()
unoptimized, optimized = self._get_test_datasets(base_dataset, map_fn,
num_parallel_calls)
self.assertDatasetsEqual(unoptimized, optimized)
def testOptimizationBadMapFn(self):
# Test map functions that give an error
def map_fn(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
base_dataset = dataset_ops.Dataset.range(5).repeat(5).batch(
5, drop_remainder=True)
_, optimized = self._get_test_datasets(base_dataset, map_fn)
nxt = dataset_ops.make_one_shot_iterator(optimized).get_next()
self.evaluate(nxt)
def testOptimizationWithCapturedInputs(self):
# Tests that vectorization works with captured inputs.
y = constant_op.constant(1, shape=(2,))
z = constant_op.constant(2, shape=(2,))
def map_fn(x):
return x, y, z
base_dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2],
[3, 4]]).repeat(5)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=True)
self.assertDatasetsEqual(optimized, unoptimized)
def testOptimizationWithMapAndBatchFusion(self):
# Tests that vectorization works on fused map and batch.
def map_fn(x):
return x**2
base_dataset = dataset_ops.Dataset.range(1000)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
base_dataset = base_dataset.with_options(options)
def _make_dataset(node_names):
dataset = base_dataset.apply(optimization.assert_next(node_names))
dataset = dataset.apply(batching.map_and_batch(map_fn, 100))
return dataset
unoptimized = _make_dataset(["MapAndBatch"])
optimized = _make_dataset(["ChooseFastestBranch"])
optimized = self._enable_map_vectorization(optimized)
self.assertDatasetsEqual(optimized, unoptimized)
@parameterized.named_parameters(
("1", True, True),
("2", True, False),
("3", False, True),
("4", False, False),
)
def testOptimizationWithChainedMapAndBatch(self, fuse_first, fuse_second):
# Tests that vectorization works on chained map and batch functions.
def map_fn(x):
return x * 2
unoptimized_seq = []
def make_apply_fn(is_fused):
if is_fused:
unoptimized_seq.append("MapAndBatch")
def apply_fn(dataset):
return dataset.apply(
batching.map_and_batch(map_fn, 2, 12, drop_remainder=True))
return apply_fn
else:
unoptimized_seq.extend(["ParallelMap", "Batch"])
def apply_fn(dataset):
return dataset.map(map_fn, 12).batch(2, drop_remainder=True)
return apply_fn
base_dataset = dataset_ops.Dataset.range(1000)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
base_dataset = base_dataset.with_options(options)
apply_fn_1 = make_apply_fn(fuse_first)
apply_fn_2 = make_apply_fn(fuse_second)
def make_dataset(node_names):
dataset = base_dataset.apply(optimization.assert_next(node_names))
dataset = apply_fn_1(dataset)
dataset = apply_fn_2(dataset)
return dataset
unoptimized = make_dataset(unoptimized_seq)
optimized = make_dataset(["ChooseFastestBranch", "ChooseFastestBranch"])
optimized = self._enable_map_vectorization(optimized)
self.assertDatasetsEqual(optimized, unoptimized)
def testOptimizationIgnoreStateful(self):
def map_fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, np.int64(0))]):
return array_ops.identity(x)
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(map_fn)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset, use_choose=False)
with self.assertRaises(errors.InvalidArgumentError):
get_next = self.getNext(dataset)
self.evaluate(get_next())
def testOptimizationIgnoreRagged(self):
# Make sure we ignore inputs that might not be uniformly sized
def map_fn(x):
return array_ops.gather(x, np.int64(0))
# output_shape = (?,)
base_dataset = dataset_ops.Dataset.range(20).batch(3, drop_remainder=False)
unoptimized, optimized = self._get_test_datasets(
base_dataset, map_fn, expect_optimized=False)
self.assertDatasetsEqual(unoptimized, optimized)
def testOptimizationIgnoreRaggedMap(self):
# Don't optimize when the output of the map fn shapes are unknown.
def map_fn(x):
return array_ops.tile(x, x)
dataset = dataset_ops.Dataset.range(10).batch(1)
dataset = dataset.map(map_fn)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset, use_choose=False)
with self.assertRaises(errors.InvalidArgumentError):
get_next = self.getNext(dataset)
self.evaluate(get_next())
def testOptimizationWithUnknownBatchShape(self):
tensor = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
# Datasets with sparse tensors have unknown output shapes.
base_dataset = dataset_ops.Dataset.from_tensors(tensor)
unoptimized = base_dataset.apply(batching.map_and_batch(lambda x: x, 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
unoptimized = unoptimized.with_options(options)
optimized = self._enable_map_vectorization(unoptimized)
self.assertDatasetsEqual(unoptimized, optimized)
def testOptimizationWithSparseTensor(self):
base_dataset = dataset_ops.Dataset.from_tensors(0)
def map_fn(x):
del x
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
# Datasets with sparse tensors have unknown output shapes.
unoptimized = base_dataset.apply(batching.map_and_batch(map_fn, 2))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
unoptimized = unoptimized.with_options(options)
optimized = self._enable_map_vectorization(unoptimized)
self.assertDatasetsEqual(unoptimized, optimized)
def testOptimizationWithPrefetch(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(lambda x: x)
dataset = dataset.prefetch(1)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset)
self.assertDatasetProduces(dataset, [list(range(10))])
def testOptimizationWithoutChooseFastest(self):
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.map(lambda x: x**2)
dataset = dataset.batch(10)
dataset = self._enable_map_vectorization(dataset, use_choose=False)
self.assertDatasetProduces(dataset, [[x**2 for x in range(10)]])
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/optimization/map_vectorization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the OptimizeDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class OptimizeDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_dataset(num_elements, batch_size):
return dataset_ops.Dataset.range(num_elements).map(lambda x: x * x).batch(
batch_size).apply(optimization.optimize(["map_and_batch_fusion"]))
self.run_core_tests(lambda: build_dataset(200, 10), None, 20)
def testWithNewFunction(self):
"""Tests that optimized datasets with new functions work."""
def build_dataset():
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.map(lambda x: x)
dataset = dataset.batch(5)
# map_vectorization adds a new vectorized function to the function
# library.
dataset = dataset.apply(optimization.optimize(["map_vectorization"]))
return dataset
self.run_core_tests(build_dataset, None, 20)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/optimize_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the UniqueDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class UniqueDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testUnique(self):
def build_dataset(num_elements, unique_elem_range):
return dataset_ops.Dataset.range(num_elements).map(
lambda x: x % unique_elem_range).apply(unique.unique())
self.run_core_tests(lambda: build_dataset(200, 100),
lambda: build_dataset(40, 100), 100)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/unique_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the dataset constructors serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.platform import test
class FromTensorsSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_tensor_dataset(self, variable_array):
components = (variable_array, np.array([1, 2, 3]), np.array(37.0))
return dataset_ops.Dataset.from_tensors(components)
def testFromTensorsCore(self):
# Equal length components
arr = np.array(1)
num_outputs = 1
diff_arr = np.array(2)
self.run_core_tests(lambda: self._build_tensor_dataset(arr),
lambda: self._build_tensor_dataset(diff_arr),
num_outputs)
class FromTensorSlicesSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_tensor_slices_dataset(self, components):
return dataset_ops.Dataset.from_tensor_slices(components)
def testFromTensorSlicesCore(self):
# Equal length components
components = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0]))
diff_comp = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[5], [6], [7], [8]]), 22),
np.array([1.0, 2.0, 3.0, 4.0]))
dict_components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
self.run_core_tests(lambda: self._build_tensor_slices_dataset(components),
lambda: self._build_tensor_slices_dataset(diff_comp), 4)
self.run_core_tests(
lambda: self._build_tensor_slices_dataset(dict_components), None, 3)
class FromSparseTensorSlicesSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_sparse_tensor_slice_dataset(self, slices):
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))],
dtype=np.int64)
values = np.array([val for s in slices for val in s], dtype=np.float64)
dense_shape = np.array(
[len(slices), max(len(s) for s in slices) + 1], dtype=np.int64)
sparse_components = sparse_tensor.SparseTensor(indices, values, dense_shape)
return dataset_ops.Dataset.from_sparse_tensor_slices(sparse_components)
def testFromSparseTensorSlicesCore(self):
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
diff_slices = [[1., 2.], [2.], [2., 3., 4.], [], [], []]
self.run_core_tests(
lambda: self._build_sparse_tensor_slice_dataset(slices),
lambda: self._build_sparse_tensor_slice_dataset(diff_slices),
9,
sparse_tensors=True)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/dataset_constructor_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GroupByReducer serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class GroupByReducerSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, components):
reducer = grouping.Reducer(
init_func=lambda _: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
return dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_reducer(lambda x: x % 5, reducer))
def testCoreGroupByReducer(self):
components = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.int64)
self.verify_unused_iterator(
lambda: self._build_dataset(components), 5, verify_exhausted=True)
self.verify_init_before_restore(
lambda: self._build_dataset(components), 5, verify_exhausted=True)
self.verify_multiple_breaks(
lambda: self._build_dataset(components), 5, verify_exhausted=True)
self.verify_reset_restored_iterator(
lambda: self._build_dataset(components), 5, verify_exhausted=True)
self.verify_restore_in_empty_graph(
lambda: self._build_dataset(components), 5, verify_exhausted=True)
diff_components = np.array([5, 4, 3, 2, 1, 0], dtype=np.int64)
self.verify_restore_in_modified_graph(
lambda: self._build_dataset(components),
lambda: self._build_dataset(diff_components),
5,
verify_exhausted=True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/group_by_reducer_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ParseExampleDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.platform import test
class ParseExampleDatasetSerializationTest(
reader_dataset_ops_test_base.MakeBatchedFeaturesDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def ParseExampleDataset(self, num_repeat, batch_size):
return self.make_batch_feature(
filenames=self.test_filenames,
num_epochs=num_repeat,
batch_size=batch_size,
reader_num_threads=5,
parser_num_threads=10)
def testSerializationCore(self):
num_repeat = 5
batch_size = 2
num_outputs = self._num_records * self._num_files * num_repeat // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self.ParseExampleDataset(
num_repeat=num_repeat, batch_size=batch_size),
lambda: self.ParseExampleDataset(num_repeat=10, batch_size=4),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/parse_example_dataset_serialization_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TakeWhileDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import take_while_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class TakeWhileDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_dataset(self, num_elements, upper_bound):
return dataset_ops.Dataset.range(num_elements).apply(
take_while_ops.take_while(lambda x: x < upper_bound))
@parameterized.parameters((23, 10, 7), (10, 50, 0), (25, 30, 25))
def testCore(self, num_elem1, num_elem2, upper_bound):
self.run_core_tests(lambda: self._build_dataset(num_elem1, upper_bound),
lambda: self._build_dataset(num_elem2, upper_bound),
upper_bound)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/take_while_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MapDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self._tensor_slice_len = 7
self._num_epochs = 14
self._num_outputs = self._tensor_slice_len * self._num_epochs
def _build_ds(self, multiplier=37.0):
components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(self._tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(self._tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(self._num_epochs))
def testSaveRestoreCore(self):
self.run_core_tests(
self._build_ds,
lambda: self._build_ds(multiplier=15.0),
self._num_outputs)
def testSaveStatefulFunction(self):
def _build_ds():
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(_map_fn)
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureVariableInMapFn(self):
def _build_ds():
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: counter_var.assign_add(1)))
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureConstantInMapFn(self):
def _build_ds():
constant_var = constant_op.constant(5)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda x: x + constant_var))
self.run_core_tests(_build_ds, None, 10)
def testCaptureDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
def testBuildDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return constant_op.constant(11000) + defun_fn_deep(
math_ops.cast(x, dtypes.int32))
return dataset_ops.Dataset.range(num_outputs).map(defun_fn)
self.run_core_tests(_build_ds, None, num_outputs)
def testSparseCore(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _build_ds(num_outputs):
return dataset_ops.Dataset.range(num_outputs).map(_sparse)
num_outputs = 10
self.run_core_tests(lambda: _build_ds(num_outputs),
lambda: _build_ds(int(num_outputs / 2)), num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/map_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the FilterDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class FilterDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_filter_range_graph(self, div):
return dataset_ops.Dataset.range(100).filter(
lambda x: math_ops.not_equal(math_ops.mod(x, div), 2))
def testFilterCore(self):
div = 3
num_outputs = sum(x % 3 != 2 for x in range(100))
self.run_core_tests(lambda: self._build_filter_range_graph(div),
lambda: self._build_filter_range_graph(div * 2),
num_outputs)
def _build_filter_dict_graph(self):
return dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x ** 2}).filter(
lambda d: math_ops.equal(d["bar"] % 2, 0)).map(
lambda d: d["foo"] + d["bar"])
def testFilterDictCore(self):
num_outputs = sum((x**2) % 2 == 0 for x in range(10))
self.run_core_tests(self._build_filter_dict_graph, None, num_outputs)
def _build_sparse_filter(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
return dataset_ops.Dataset.range(10).map(_map_fn).filter(_filter_fn).map(
lambda x, i: x)
def testSparseCore(self):
num_outputs = 5
self.run_core_tests(self._build_sparse_filter, None, num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/filter_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the UnbatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class UnbatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2):
components = (
np.arange(tensor_slice_len),
np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
return dataset_ops.Dataset.from_tensor_slices(components).batch(
batch_size).apply(batching.unbatch())
def testCore(self):
tensor_slice_len = 8
batch_size = 2
num_outputs = tensor_slice_len
self.run_core_tests(
lambda: self.build_dataset(15.0, tensor_slice_len, batch_size),
lambda: self.build_dataset(20.0, tensor_slice_len, batch_size),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/unbatch_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ShuffleDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class ShuffleDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_shuffle_dataset(
self,
range_limit=10,
num_repeats=5,
buffer_size=5,
seed=None,
reshuffle_each_iteration=None,
):
return dataset_ops.Dataset.range(range_limit).shuffle(
buffer_size,
seed=seed,
reshuffle_each_iteration=reshuffle_each_iteration).repeat(num_repeats)
def testShuffleCore(self):
seed = 55
range_limit = 5
num_repeats = 2
num_outputs = range_limit * num_repeats
buffer_sizes = [1, 3, 5, 8, 10]
# pylint: disable=cell-var-from-loop
# pylint: disable=g-long-lambda
for reshuffle_each_iteration in [True, False]:
for buffer_size in buffer_sizes:
self.run_core_tests(
lambda: self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=seed,
reshuffle_each_iteration=reshuffle_each_iteration),
lambda: self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=10,
reshuffle_each_iteration=reshuffle_each_iteration),
num_outputs)
# pylint: enable=cell-var-from-loop
# pylint: enable=g-long-lambda
def testNonDeterministicSeeding(self):
range_limit = 5
num_repeats = 2
num_outputs = range_limit * num_repeats
buffer_sizes = [1, 3, 5, 8, 10]
for reshuffle_each_iteration in [True, False]:
for buffer_size in buffer_sizes:
def ds_fn():
# pylint: disable=cell-var-from-loop
return self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=None, # Iterator seeds are generated non-deterministically.
reshuffle_each_iteration=reshuffle_each_iteration)
# pylint: enable=cell-var-from-loop
# We checkpoint the initial state of the Dataset so that we can restore
# the seeds in the next run. Since the seeding is non-deterministic
# the dataset gets initialized with different seeds each time.
expected = self.gen_outputs(
ds_fn,
break_points=[0],
num_outputs=num_outputs,
ckpt_saved=False,
verify_exhausted=False,
save_checkpoint_at_end=False)
actual = self.gen_outputs(
ds_fn,
break_points=self.gen_break_points(num_outputs),
num_outputs=num_outputs,
ckpt_saved=True,
verify_exhausted=False)
self.match(expected, actual)
def testMultipleIterators(self):
range_limit = 5
num_repeats = 2
num_outputs = range_limit * num_repeats
buffer_sizes = [1, 3, 5, 8, 10]
for reshuffle_each_iteration in [True, False]:
for buffer_size in buffer_sizes:
def ds_fn():
# pylint: disable=cell-var-from-loop
return self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=None, # Iterator seeds are generated non-deterministically.
reshuffle_each_iteration=reshuffle_each_iteration)
# pylint: enable=cell-var-from-loop
with ops.Graph().as_default() as g:
ds = ds_fn()
iterators = [ds.make_one_shot_iterator(), ds.make_one_shot_iterator()]
get_next_ops = [it.get_next() for it in iterators]
saveables = [
contrib_iterator_ops.make_saveable_from_iterator(it)
for it in iterators
]
for saveable in saveables:
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
saver = saver_lib.Saver(allow_empty=True)
with self.session(graph=g) as sess:
self._save(sess, saver)
expected = [self.evaluate(get_next_ops) for _ in range(num_outputs)]
self._restore(saver, sess)
actual = [self.evaluate(get_next_ops) for _ in range(num_outputs)]
self.match(expected, actual)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/shuffle_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ParallelMapDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class ParallelMapDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self._tensor_slice_len = 7
self._num_epochs = 1
self._num_outputs = self._tensor_slice_len * self._num_epochs
def _build_ds(self, multiplier=37.0):
components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(self._tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(self._tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=3).repeat(self._num_epochs))
def _build_ds_with_prefetch(self, multiplier=37.0):
components = (np.arange(self._tensor_slice_len), np.array([[1, 2, 3]]) *
np.arange(self._tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(self._tensor_slice_len))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_parallel_calls=3).repeat(self._num_epochs).prefetch(5))
def testSaveRestoreCore(self):
for ds_fn in [self._build_ds, self._build_ds_with_prefetch]:
self.run_core_tests(
ds_fn,
lambda: ds_fn(multiplier=15.0), # pylint: disable=cell-var-from-loop
self._num_outputs)
def testSaveStatefulFunction(self):
def _build_ds():
def _map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(
_map_fn, num_parallel_calls=2).prefetch(2)
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureVariableInMapFn(self):
def _build_ds():
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda _: counter_var.assign_add(1),
num_parallel_calls=2).prefetch(2))
self.verify_error_on_save(_build_ds, 15, errors.InvalidArgumentError)
def testCaptureConstantInMapFn(self):
def _build_ds():
constant_var = constant_op.constant(5)
return (dataset_ops.Dataset.from_tensors(0).repeat(10).map(
lambda x: x + constant_var, num_parallel_calls=2).prefetch(2))
self.run_core_tests(_build_ds, None, 10)
def testCaptureDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=2).prefetch(2)
self.run_core_tests(_build_ds, None, num_outputs)
def testBuildDefunInMapFn(self):
num_outputs = 100
def _build_ds():
@function.Defun(dtypes.int64)
def defun_fn(x):
@function.Defun(dtypes.int32)
def defun_fn_deep(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return constant_op.constant(11000) + defun_fn_deep(
math_ops.cast(x, dtypes.int32))
return dataset_ops.Dataset.range(num_outputs).map(
defun_fn, num_parallel_calls=2).prefetch(2)
self.run_core_tests(_build_ds, None, num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/parallel_map_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SqlDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.kernel_tests import sql_dataset_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class SqlDatasetSerializationTest(
sql_dataset_test_base.SqlDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, num_repeats):
data_source_name = os.path.join(test.get_temp_dir(), "tftest.sqlite")
driver_name = array_ops.placeholder_with_default(
array_ops.constant("sqlite", dtypes.string), shape=[])
query = ("SELECT first_name, last_name, motto FROM students ORDER BY "
"first_name DESC")
output_types = (dtypes.string, dtypes.string, dtypes.string)
return readers.SqlDataset(driver_name, data_source_name, query,
output_types).repeat(num_repeats)
def testSQLSaveable(self):
num_repeats = 4
num_outputs = num_repeats * 2
self.run_core_tests(lambda: self._build_dataset(num_repeats),
lambda: self._build_dataset(num_repeats // 2),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/sql_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration test for dataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class SerializationIntegrationTest(test.TestCase):
def _build_input_pipeline(self, name, num_outputs):
with ops.name_scope(name):
ds = dataset_ops.Dataset.range(num_outputs).shuffle(
10, reshuffle_each_iteration=False).prefetch(10)
iterator = ds.make_initializable_iterator()
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
return iterator.initializer, iterator.get_next()
def _build_graph(self, num_pipelines, num_outputs):
init_ops = []
get_next_ops = []
for i in range(num_pipelines):
name = "input_pipeline_%d" % i
init_op, get_next_op = self._build_input_pipeline(name, num_outputs)
init_ops.append(init_op)
get_next_ops.append(get_next_op)
saver = saver_lib.Saver()
return init_ops, get_next_ops, saver
def _ckpt_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def testConcurrentSaves(self):
num_pipelines = 100
num_outputs = 100
break_point = 10
all_outputs = [[] for _ in range(num_pipelines)]
with ops.Graph().as_default() as g:
init_ops, get_next_ops, saver = self._build_graph(num_pipelines,
num_outputs)
with self.session(graph=g) as sess:
self.evaluate(init_ops)
for _ in range(break_point):
output = self.evaluate(get_next_ops)
for i in range(num_pipelines):
all_outputs[i].append(output[i])
saver.save(sess, self._ckpt_path())
with ops.Graph().as_default() as g:
init_ops, get_next_ops, saver = self._build_graph(num_pipelines,
num_outputs)
with self.session(graph=g) as sess:
saver.restore(sess, self._ckpt_path())
for _ in range(num_outputs - break_point):
output = self.evaluate(get_next_ops)
for i in range(num_pipelines):
all_outputs[i].append(output[i])
for output in all_outputs:
self.assertSequenceEqual(sorted(output), range(num_outputs))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/serialization_integration_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the StatsDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.experimental.ops import stats_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# TODO(b/116814321): Can not checkpoint input_pipeline with the
# transformation `stats_ops.set_stats_aggregator`, since we don't support
# saving/restoring resources (StatsAggregator in this case) yet.
class StatsDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset_bytes_stats(self, num_elements):
return dataset_ops.Dataset.range(num_elements).map(
lambda x: array_ops.tile([x], ops.convert_to_tensor([x]))).apply(
stats_ops.bytes_produced_stats("bytes_produced"))
def test_bytes_produced_stats_invalid_tag_shape(self):
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1"):
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: dataset_ops.Dataset.range(100).apply(
stats_ops.bytes_produced_stats(["bytes_produced"])),
None, 100)
# pylint: enable=g-long-lambda
def testBytesStatsDatasetSaveableCore(self):
num_outputs = 100
self.run_core_tests(
lambda: self._build_dataset_bytes_stats(num_outputs),
lambda: self._build_dataset_bytes_stats(num_outputs // 10), num_outputs)
def _build_dataset_latency_stats(self, num_elements, tag="record_latency"):
return dataset_ops.Dataset.range(num_elements).apply(
stats_ops.latency_stats(tag))
def _build_dataset_multiple_tags(self,
num_elements,
tag1="record_latency",
tag2="record_latency_2"):
return dataset_ops.Dataset.range(num_elements).apply(
stats_ops.latency_stats(tag1)).apply(stats_ops.latency_stats(tag2))
def test_latency_stats_invalid_tag_shape(self):
with self.assertRaisesRegexp(
ValueError, "Shape must be rank 0 but is rank 1"):
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: dataset_ops.Dataset.range(100).apply(
stats_ops.latency_stats(["record_latency", "record_latency_2"])),
None, 100)
# pylint: enable=g-long-lambda
def testLatencyStatsDatasetSaveableCore(self):
num_outputs = 100
self.run_core_tests(
lambda: self._build_dataset_latency_stats(num_outputs),
lambda: self._build_dataset_latency_stats(num_outputs // 10),
num_outputs)
self.run_core_tests(lambda: self._build_dataset_multiple_tags(num_outputs),
None, num_outputs)
tag1 = "record_latency"
tag2 = "record_latency"
self.run_core_tests(
lambda: self._build_dataset_multiple_tags(num_outputs, tag1, tag2),
None, num_outputs)
def _build_dataset_stats_aggregator(self):
aggregator = stats_aggregator.StatsAggregator()
return dataset_ops.Dataset.range(10).apply(
stats_ops.set_stats_aggregator(aggregator))
def test_set_stats_aggregator_not_support_checkpointing(self):
with self.assertRaisesRegexp(errors.UnimplementedError,
"does not support checkpointing"):
self.run_core_tests(self._build_dataset_stats_aggregator, None, 10)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/stats_dataset_serialization_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ZipDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ChooseFastestDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
num_outputs = 10
batch_size = 2
def build_ds():
dataset = dataset_ops.Dataset.range(num_outputs)
map_fn = lambda x: x * 2
return optimization._ChooseFastestDataset([ # pylint: disable=protected-access
dataset.map(map_fn).batch(batch_size),
dataset.batch(batch_size).map(map_fn)
])
self.run_core_tests(build_ds, None, num_outputs // 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the RangeDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RangeDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _iterator_checkpoint_prefix_local(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(self, iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
self._iterator_checkpoint_prefix_local(),
parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(self, iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(self._iterator_checkpoint_prefix_local()),
dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def testSaveRestore(self):
def _build_graph(start, stop):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
self.evaluate(init_op)
for i in range(start, break_point):
self.assertEqual(i, self.evaluate(get_next))
self.evaluate(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
self.evaluate(init_op)
self.evaluate(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Saving and restoring in same session.
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
self.evaluate(init_op)
for i in range(start, break_point):
self.assertEqual(i, self.evaluate(get_next))
self.evaluate(save_op)
self.evaluate(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
def _build_range_dataset(self, start, stop):
return dataset_ops.Dataset.range(start, stop)
def testRangeCore(self):
start = 2
stop = 10
stop_1 = 8
self.run_core_tests(lambda: self._build_range_dataset(start, stop),
lambda: self._build_range_dataset(start, stop_1),
stop - start)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/range_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ShuffleAndRepeatDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import shuffle_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ShuffleAndRepeatSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_ds(self, seed):
return dataset_ops.Dataset.range(20).apply(
shuffle_ops.shuffle_and_repeat(buffer_size=5, count=5, seed=seed))
def testCore(self):
self.run_core_tests(lambda: self._build_ds(10), lambda: self._build_ds(20),
100)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/shuffle_and_repeat_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GroupByWindow serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import grouping
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class GroupByWindowSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, components):
return dataset_ops.Dataset.from_tensor_slices(components).repeat(-1).apply(
grouping.group_by_window(lambda x: x % 3, lambda _, xs: xs.batch(4), 4))
def testCoreGroupByWindow(self):
components = np.array(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 0, 0], dtype=np.int64)
self.verify_unused_iterator(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_init_before_restore(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
self.verify_restore_in_empty_graph(
lambda: self._build_dataset(components), 12, verify_exhausted=False)
diff_components = np.array([0, 0, 0, 1, 1, 1], dtype=np.int64)
self.verify_restore_in_modified_graph(
lambda: self._build_dataset(components),
lambda: self._build_dataset(diff_components),
12,
verify_exhausted=False)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/group_by_window_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ConcatenateDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ConcatenateDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_concatenate_dataset(self, var_array):
input_components = (np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (np.tile(
np.array([[5], [6], [7], [8], [9]]), 20), var_array)
return dataset_ops.Dataset.from_tensor_slices(input_components).concatenate(
dataset_ops.Dataset.from_tensor_slices(to_concatenate_components))
def testConcatenateCore(self):
num_outputs = 9
array = np.tile(np.array([[16], [17], [18], [19], [20]]), 15)
diff_array = np.array([[1], [2], [3], [4], [5]])
self.run_core_tests(lambda: self._build_concatenate_dataset(array),
lambda: self._build_concatenate_dataset(diff_array),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/concatenate_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ScanDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ScanDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, num_elements):
return dataset_ops.Dataset.from_tensors(1).repeat(num_elements).apply(
scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1])))
def testScanCore(self):
num_output = 5
self.run_core_tests(lambda: self._build_dataset(num_output),
lambda: self._build_dataset(2), num_output)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/scan_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ZipDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ZipDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, arr):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array(arr)
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
]
return dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
def testCore(self):
# Equal length components
arr = [37.0, 38.0, 39.0, 40.0]
num_outputs = len(arr)
self.run_core_tests(lambda: self._build_dataset(arr), None, num_outputs)
# Variable length components
diff_size_arr = [1.0, 2.0]
self.run_core_tests(lambda: self._build_dataset(diff_size_arr),
lambda: self._build_dataset(arr), 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/zip_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the PaddedBatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class PaddedBatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testPaddedBatch(self):
def build_dataset(seq_lens):
return dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
lambda x: array_ops.fill([x], x)).padded_batch(
4, padded_shapes=[-1])
seq_lens1 = np.random.randint(1, 20, size=(32,)).astype(np.int32)
seq_lens2 = np.random.randint(21, 40, size=(32,)).astype(np.int32)
self.run_core_tests(lambda: build_dataset(seq_lens1),
lambda: build_dataset(seq_lens2), 8)
def testPaddedBatchNonDefaultPadding(self):
def build_dataset(seq_lens):
def fill_tuple(x):
filled = array_ops.fill([x], x)
return (filled, string_ops.as_string(filled))
padded_shape = [-1]
return dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
fill_tuple).padded_batch(
4,
padded_shapes=(padded_shape, padded_shape),
padding_values=(-1, "<end>"))
seq_lens1 = np.random.randint(1, 20, size=(32,)).astype(np.int32)
seq_lens2 = np.random.randint(21, 40, size=(32,)).astype(np.int32)
self.run_core_tests(lambda: build_dataset(seq_lens1),
lambda: build_dataset(seq_lens2), 8)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/padded_batch_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the PrefetchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class PrefetchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def build_dataset(self, seed):
return dataset_ops.Dataset.range(100).prefetch(10).shuffle(
buffer_size=10, seed=seed, reshuffle_each_iteration=False)
def testCore(self):
num_outputs = 100
self.run_core_tests(lambda: self.build_dataset(10),
lambda: self.build_dataset(20), num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/prefetch_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the sequence datasets serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class SkipDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_skip_dataset(self, count):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).skip(count)
def testSkipFewerThanInputs(self):
count = 4
num_outputs = 10 - count
self.run_core_tests(lambda: self._build_skip_dataset(count),
lambda: self._build_skip_dataset(count + 2),
num_outputs)
def testSkipVarious(self):
# Skip more than inputs
self.run_core_tests(lambda: self._build_skip_dataset(20), None, 0)
# Skip exactly the input size
self.run_core_tests(lambda: self._build_skip_dataset(10), None, 0)
self.run_core_tests(lambda: self._build_skip_dataset(-1), None, 0)
# Skip nothing
self.run_core_tests(lambda: self._build_skip_dataset(0), None, 10)
def testInvalidSkip(self):
with self.assertRaisesRegexp(ValueError,
'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_skip_dataset([1, 2]), None, 0)
class TakeDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_take_dataset(self, count):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).take(count)
def testTakeFewerThanInputs(self):
count = 4
self.run_core_tests(
lambda: self._build_take_dataset(count),
lambda: self._build_take_dataset(count + 2),
count,
)
def testTakeVarious(self):
# Take more than inputs
self.run_core_tests(lambda: self._build_take_dataset(20), None, 10)
# Take exactly the input size
self.run_core_tests(lambda: self._build_take_dataset(10), None, 10)
# Take all
self.run_core_tests(lambda: self._build_take_dataset(-1), None, 10)
# Take nothing
self.run_core_tests(lambda: self._build_take_dataset(0), None, 0)
def testInvalidTake(self):
with self.assertRaisesRegexp(ValueError,
'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_take_dataset([1, 2]), None, 0)
class RepeatDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_repeat_dataset(self, count, take_count=3):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).take(
take_count).repeat(count)
def testFiniteRepeat(self):
count = 10
self.run_core_tests(lambda: self._build_repeat_dataset(count),
lambda: self._build_repeat_dataset(count + 2),
3 * count)
def testEmptyRepeat(self):
self.run_core_tests(lambda: self._build_repeat_dataset(0), None, 0)
def testInfiniteRepeat(self):
self.verify_unused_iterator(
lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False)
self.verify_init_before_restore(
lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
self.verify_restore_in_modified_graph(
lambda: self._build_repeat_dataset(-1),
lambda: self._build_repeat_dataset(2),
20,
verify_exhausted=False)
# Test repeat empty dataset
self.run_core_tests(lambda: self._build_repeat_dataset(-1, 0), None, 0)
def testInvalidRepeat(self):
with self.assertRaisesRegexp(
ValueError, 'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_repeat_dataset([1, 2], 0),
None, 0)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/sequence_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the BatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class BatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2):
components = (
np.arange(tensor_slice_len),
np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis],
np.array(multiplier) * np.arange(tensor_slice_len))
return dataset_ops.Dataset.from_tensor_slices(components).batch(batch_size)
def testCore(self):
tensor_slice_len = 8
batch_size = 2
num_outputs = tensor_slice_len // batch_size
self.run_core_tests(
lambda: self.build_dataset(15.0, tensor_slice_len, batch_size),
lambda: self.build_dataset(20.0, tensor_slice_len, batch_size),
num_outputs)
def _build_dataset_dense_to_sparse(self, components):
return dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.fill([x], x)).apply(
batching.dense_to_sparse_batch(4, [12]))
def testDenseToSparseBatchDatasetCore(self):
components = np.random.randint(5, size=(40,)).astype(np.int32)
diff_comp = np.random.randint(2, size=(100,)).astype(np.int32)
num_outputs = len(components) // 4
self.run_core_tests(lambda: self._build_dataset_dense_to_sparse(components),
lambda: self._build_dataset_dense_to_sparse(diff_comp),
num_outputs)
def _sparse(self, i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
def _build_dataset_sparse(self, batch_size=5):
return dataset_ops.Dataset.range(10).map(self._sparse).batch(batch_size)
def testSparseCore(self):
self.run_core_tests(self._build_dataset_sparse,
lambda: self._build_dataset_sparse(2), 2)
def _build_dataset_nested_sparse(self):
return dataset_ops.Dataset.range(10).map(self._sparse).batch(5).batch(2)
def testNestedSparseCore(self):
self.run_core_tests(self._build_dataset_nested_sparse, None, 1)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/batch_dataset_serialization_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the _AutoShard dataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.lib.io import python_io
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class AutoShardDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(10):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(10):
writer.write(self._record(i, j))
writer.close()
return filenames
def setUp(self):
self._filenames = self._createFiles()
def testCore(self):
def build_dataset():
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(readers.TFRecordDataset, 10))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
return dataset
self.run_core_tests(build_dataset, None, 20)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/auto_shard_dataset_serialization_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ShardDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class ShardDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_dataset(self, num_elements, num_shards, index):
return dataset_ops.Dataset.range(num_elements).shard(num_shards, index)
@parameterized.parameters((10, 5, 2, 3), (10, 10, 0, 9), (100, 2, 0, 1))
def testCore(self, elems, num_shards, index1, index2):
self.run_core_tests(lambda: self._build_dataset(elems, num_shards, index1),
lambda: self._build_dataset(elems, num_shards, index2),
elems // num_shards)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/shard_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing serializable datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import nest
def remove_variants(get_next_op):
# TODO(b/72408568): Remove this once session.run can get
# variant tensors.
"""Remove variants from a nest structure, so sess.run will execute."""
def _remove_variant(x):
if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:
return ()
else:
return x
return nest.map_structure(_remove_variant, get_next_op)
class DatasetSerializationTestBase(test.TestCase):
"""Base class for testing serializable datasets."""
def tearDown(self):
self._delete_ckpt()
# TODO(b/72657739): Remove sparse_tensor argument, which is to test the
# (deprecated) saveable `SparseTensorSliceDataset`, once the API
# `from_sparse_tensor_slices()`and related tests are deleted.
def run_core_tests(self, ds_fn1, ds_fn2, num_outputs, sparse_tensors=False):
"""Runs the core tests.
Args:
ds_fn1: 0-argument function that returns a Dataset.
ds_fn2: 0-argument function that returns a Dataset different from
ds_fn1. If None, verify_restore_in_modified_graph test is not run.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
Raises:
AssertionError if any test fails.
"""
# NOTE: We disable all default optimizations in serialization tests in order
# to test the actual dataset in question.
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
def ds_fn1_no_opt():
return ds_fn1().with_options(options)
self.verify_unused_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_fully_used_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_exhausted_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_init_before_restore(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_multiple_breaks(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_reset_restored_iterator(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
self.verify_restore_in_empty_graph(
ds_fn1_no_opt, num_outputs, sparse_tensors=sparse_tensors)
if ds_fn2:
def ds_fn2_no_opt():
return ds_fn2().with_options(options)
self.verify_restore_in_modified_graph(
ds_fn1_no_opt,
ds_fn2_no_opt,
num_outputs,
sparse_tensors=sparse_tensors)
def verify_unused_iterator(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn, [0],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_fully_used_iterator(self, ds_fn, num_outputs,
sparse_tensors=False):
"""Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if test fails.
"""
self.verify_run_with_breaks(
ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):
"""Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
self.gen_outputs(
ds_fn, [],
num_outputs,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
actual = self.gen_outputs(
ds_fn, [],
0,
ckpt_saved=True,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
self.assertEqual(len(actual), 0)
def verify_init_before_restore(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that restoring into an already initialized iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs),
num_outputs,
init_before_restore=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_multiple_breaks(self,
ds_fn,
num_outputs,
num_breaks=10,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to save/restore at multiple break points.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
num_breaks: The number of break points. These are uniformly spread in
[0, num_outputs] both inclusive.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs, num_breaks),
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_reset_restored_iterator(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to re-initialize a restored iterator.
This is useful when restoring a training checkpoint during validation.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Collect ground truth containing all outputs.
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Skip some items and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Restore from checkpoint and then run init_op.
with ops.Graph().as_default() as g:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
self._initialize(init_op, sess)
for _ in range(num_outputs):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_modified_graph(self,
ds_fn1,
ds_fn2,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in a modified graph.
Builds an input pipeline using ds_fn1, runs it for `break_point` steps
and saves a checkpoint. Then builds a new graph using ds_fn2, restores
the checkpoint from ds_fn1 and verifies that the restore is successful.
Args:
ds_fn1: See `run_core_tests`.
ds_fn2: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn1
# in `expected`.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn1, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn1 and save checkpoint.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build graph for ds_fn2 but load checkpoint for ds_fn1.
with ops.Graph().as_default() as g:
_, get_next_op, saver = self._build_graph(
ds_fn2, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_empty_graph(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in an empty graph.
Builds an input pipeline using ds_fn, runs it for `break_point` steps
and saves a checkpoint. Then builds a new empty graph, restores
the checkpoint from ds_fn and verifies that the restore is successful.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn
# in `expected`.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build an empty graph but load checkpoint for ds_fn.
with ops.Graph().as_default() as g:
get_next_op, saver = self._build_empty_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_error_on_save(self,
ds_fn,
num_outputs,
error,
break_point=None,
sparse_tensors=False):
"""Attempts to save a non-saveable iterator.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
error: Declared error when trying to save iterator.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
for _ in range(break_point):
sess.run(get_next_op)
with self.assertRaises(error):
self._save(sess, saver)
def verify_run_with_breaks(self,
ds_fn,
break_points,
num_outputs,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that ds_fn() produces the same outputs with and without breaks.
1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
*without* stopping at break points.
2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
with stopping at break points.
Deep matches outputs from 1 and 2.
Args:
ds_fn: See `gen_outputs`.
break_points: See `gen_outputs`.
num_outputs: See `gen_outputs`.
init_before_restore: See `gen_outputs`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
actual = self.gen_outputs(
ds_fn,
break_points,
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
self.match(expected, actual)
def gen_outputs(self,
ds_fn,
break_points,
num_outputs,
ckpt_saved=False,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True,
save_checkpoint_at_end=True):
"""Generates elements from input dataset while stopping at break points.
Produces `num_outputs` outputs and saves the state of the iterator in the
Saver checkpoint.
Args:
ds_fn: 0-argument function that returns the dataset.
break_points: A list of integers. For each `break_point` in
`break_points`, we produce outputs till `break_point` number of items
have been produced and then checkpoint the state. The current graph
and session are destroyed and a new graph and session are used to
produce outputs till next checkpoint or till `num_outputs` elements
have been produced. `break_point` must be <= `num_outputs`.
num_outputs: The total number of outputs to produce from the iterator.
ckpt_saved: Whether a checkpoint already exists. If False, we build the
graph from ds_fn.
init_before_restore: Whether init should be called before saver.restore.
This is just so that we can verify that restoring an already initialized
iterator works.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
save_checkpoint_at_end: Whether to save a checkpoint after producing all
outputs. If False, checkpoints are saved each break point but not at the
end. Note that checkpoints overwrite each other so there is always only
a single checkpoint available. Defaults to True.
Returns:
A list of `num_outputs` items.
"""
outputs = []
def get_ops():
if ckpt_saved:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
else:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
return init_op, get_next_op, saver
for i in range(len(break_points) + 1):
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = get_ops()
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
if ckpt_saved:
if init_before_restore:
self._initialize(init_op, sess)
self._restore(saver, sess)
else:
self._initialize(init_op, sess)
start = break_points[i - 1] if i > 0 else 0
end = break_points[i] if i < len(break_points) else num_outputs
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
if i == len(break_points) and verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
if save_checkpoint_at_end or i < len(break_points):
self._save(sess, saver)
ckpt_saved = True
return outputs
def match(self, expected, actual):
"""Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict, as well as SparseTensorValue and RaggedTensorValue.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
Raises:
AssertionError if matching fails.
"""
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_sequence(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for key1, key2 in zip(sorted(expected), sorted(actual)):
self.assertEqual(key1, key2)
self.match(expected[key1], actual[key2])
else:
for item1, item2 in zip(expected, actual):
self.match(item1, item2)
elif isinstance(expected, sparse_tensor.SparseTensorValue):
return self.match(
(expected.indices, expected.values, expected.dense_shape),
(actual.indices, actual.values, actual.dense_shape))
elif isinstance(expected, ragged_tensor_value.RaggedTensorValue):
return self.match(
(expected.values, expected.row_splits),
(actual.values, actual.row_splits))
else:
self.assertEqual(expected, actual)
def does_not_match(self, expected, actual):
with self.assertRaises(AssertionError):
self.match(expected, actual)
def gen_break_points(self, num_outputs, num_samples=10):
"""Generates `num_samples` breaks points in [0, num_outputs]."""
return np.linspace(0, num_outputs, num_samples, dtype=int)
def _build_graph(self, ds_fn, sparse_tensors=False):
iterator = dataset_ops.make_initializable_iterator(ds_fn())
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
init_op = iterator.initializer
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
self._add_iterator_ops_to_collection(init_op, get_next, ds_fn,
sparse_tensors)
saver = saver_lib.Saver(allow_empty=True)
return init_op, get_next, saver
def _build_empty_graph(self, ds_fn, sparse_tensors=False):
iterator = iterator_ops.Iterator.from_structure(
self._get_output_types(ds_fn),
output_shapes=self._get_output_shapes(ds_fn),
output_classes=self._get_output_classes(ds_fn))
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
saver = saver_lib.Saver(allow_empty=True)
return get_next, saver
def _add_iterator_ops_to_collection(self,
init_op,
get_next,
ds_fn,
sparse_tensors=False):
ops.add_to_collection("iterator_ops", init_op)
# `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
# do not support tuples we flatten the tensors and restore the shape in
# `_get_iterator_ops_from_collection`.
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
ops.add_to_collection("iterator_ops", get_next.indices)
ops.add_to_collection("iterator_ops", get_next.values)
ops.add_to_collection("iterator_ops", get_next.dense_shape)
return
get_next_list = nest.flatten(get_next)
for i, output_class in enumerate(
nest.flatten(self._get_output_classes(ds_fn))):
if output_class is sparse_tensor.SparseTensor:
ops.add_to_collection("iterator_ops", get_next_list[i].indices)
ops.add_to_collection("iterator_ops", get_next_list[i].values)
ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape)
else:
ops.add_to_collection("iterator_ops", get_next_list[i])
def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False):
all_ops = ops.get_collection("iterator_ops")
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
init_op, indices, values, dense_shape = all_ops
return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape)
get_next_list = []
i = 1
for output_class in nest.flatten(self._get_output_classes(ds_fn)):
if output_class is sparse_tensor.SparseTensor:
indices, values, dense_shape = all_ops[i:i + 3]
i += 3
get_next_list.append(
sparse_tensor.SparseTensor(indices, values, dense_shape))
else:
get_next_list.append(all_ops[i])
i += 1
return all_ops[0], nest.pack_sequence_as(
self._get_output_types(ds_fn), get_next_list)
def _get_output_types(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_types(ds_fn())
def _get_output_shapes(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_shapes(ds_fn())
def _get_output_classes(self, ds_fn):
with ops.Graph().as_default():
return dataset_ops.get_legacy_output_classes(ds_fn())
def _ckpt_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _latest_ckpt(self):
return checkpoint_management.latest_checkpoint(self.get_temp_dir())
def _save(self, sess, saver):
saver.save(sess, self._ckpt_path())
def _restore(self, saver, sess):
sess.run(lookup_ops.tables_initializer())
saver.restore(sess, self._latest_ckpt())
def _initialize(self, init_op, sess):
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
sess.run(init_op)
def _import_meta_graph(self):
meta_file_path = self._ckpt_path() + ".meta"
return saver_lib.import_meta_graph(meta_file_path)
def _delete_ckpt(self):
# Remove all checkpoint files.
prefix = self._ckpt_path()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the IgnoreErrors input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import error_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class IgnoreErrorsSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_ds(self, components):
return dataset_ops.Dataset.from_tensor_slices(components).map(
lambda x: array_ops.check_numerics(x, "message")).apply(
error_ops.ignore_errors())
def testIgnoreErrorsCore(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
diff_components = np.array([1., 2., 3., np.nan]).astype(np.float32)
num_outputs = 4
self.run_core_tests(lambda: self._build_ds(components),
lambda: self._build_ds(diff_components), num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/ignore_errors_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the CsvDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.platform import test
class CsvDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self._num_cols = 7
self._num_rows = 10
self._num_epochs = 14
self._num_outputs = self._num_rows * self._num_epochs
inputs = [
",".join(str(self._num_cols * j + i)
for i in range(self._num_cols))
for j in range(self._num_rows)
]
contents = "\n".join(inputs).encode("utf-8")
self._filename = os.path.join(self.get_temp_dir(), "file.csv")
self._compressed = os.path.join(self.get_temp_dir(),
"comp.csv") # GZip compressed
with open(self._filename, "wb") as f:
f.write(contents)
with gzip.GzipFile(self._compressed, "wb") as f:
f.write(contents)
def ds_func(self, **kwargs):
compression_type = kwargs.get("compression_type", None)
if compression_type == "GZIP":
filename = self._compressed
elif compression_type is None:
filename = self._filename
else:
raise ValueError("Invalid compression type:", compression_type)
return readers.CsvDataset(filename, **kwargs).repeat(self._num_epochs)
def testSerializationCore(self):
defs = [[0]] * self._num_cols
self.run_core_tests(
lambda: self.ds_func(record_defaults=defs, buffer_size=2),
lambda: self.ds_func(record_defaults=defs, buffer_size=12),
self._num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/csv_dataset_serialization_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ChooseFastestBranchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ChooseFastestBranchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_ds(size):
dataset = dataset_ops.Dataset.range(size)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
return optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
dataset, [branch_0, branch_1],
ratio_numerator=10)
for size in [100, 1000]:
self.run_core_tests(lambda: build_ds(size), None, size // 10) # pylint: disable=cell-var-from-loop
def testWithCapture(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithPrefetch(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithMoreOutputThanInput(self):
def build_ds():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
return optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=10,
num_elements_per_branch=100)
self.run_core_tests(build_ds, None, 1000)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/choose_fastest_branch_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TextLineDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TextLineDatasetSerializationTest(
reader_dataset_ops_test_base.TextLineDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return core_readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
lambda: self._build_iterator_graph(test_filenames), num_outputs)
# pylint: enable=cell-var-from-loop
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/textline_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MatchingFilesDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.platform import test
class MatchingFilesDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_patterns):
return matching_files.MatchingFilesDataset(test_patterns)
def testMatchingFilesCore(self):
tmp_dir = tempfile.mkdtemp()
width = 16
depth = 8
for i in range(width):
for j in range(depth):
new_base = os.path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
if not os.path.exists(new_base):
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
open(filename, 'w').close()
patterns = [
os.path.join(tmp_dir, os.path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
num_outputs = width * len(patterns)
self.run_core_tests(lambda: self._build_iterator_graph(patterns),
lambda: self._build_iterator_graph(patterns[0:1]),
num_outputs)
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/matching_files_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapAndBatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class MapAndBatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testNumParallelBatches(self):
range_size = 11
num_repeats = 2
batch_size = 5
total_outputs = range_size * num_repeats
num_outputs_drop_remainder = total_outputs // batch_size
num_outputs_keep_remainder = int(math.ceil(total_outputs / batch_size))
num_parallel_batches = 2
def build_ds(range_start, drop_remainder=False):
def _map_fn(x):
return math_ops.square(x)
return dataset_ops.Dataset.range(
range_start, range_start + range_size).repeat(num_repeats).apply(
batching.map_and_batch(
map_func=_map_fn,
batch_size=batch_size,
num_parallel_batches=num_parallel_batches,
drop_remainder=drop_remainder))
self.run_core_tests(lambda: build_ds(10), lambda: build_ds(15),
num_outputs_keep_remainder)
self.run_core_tests(lambda: build_ds(10, True), lambda: build_ds(15, True),
num_outputs_drop_remainder)
def testNumParallelCalls(self):
range_size = 11
num_repeats = 2
batch_size = 5
total_outputs = range_size * num_repeats
num_outputs_drop_remainder = total_outputs // batch_size
num_outputs_keep_remainder = int(math.ceil(total_outputs / batch_size))
num_parallel_calls = 7
def build_ds(range_start, drop_remainder=False):
def _map_fn(x):
return math_ops.square(x)
return dataset_ops.Dataset.range(
range_start, range_start + range_size).repeat(num_repeats).apply(
batching.map_and_batch(
map_func=_map_fn,
batch_size=batch_size,
num_parallel_calls=num_parallel_calls,
drop_remainder=drop_remainder))
self.run_core_tests(lambda: build_ds(10), lambda: build_ds(15),
num_outputs_keep_remainder)
self.run_core_tests(lambda: build_ds(10, True), lambda: build_ds(15, True),
num_outputs_drop_remainder)
def testSparse(self):
def build_dataset():
def map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0]], values=(i * [1]), dense_shape=[1])
return dataset_ops.Dataset.range(10).apply(
batching.map_and_batch(map_fn, 5))
self.run_core_tests(build_dataset, None, 2)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/map_and_batch_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SampleFromDatasets serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class SampleFromDatasetsSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, probs, num_samples):
dataset = interleave_ops.sample_from_datasets(
[
dataset_ops.Dataset.from_tensors(i).repeat(None)
for i in range(len(probs))
],
probs,
seed=1813)
return dataset.take(num_samples)
def testSerializationCore(self):
self.run_core_tests(
lambda: self._build_dataset([0.5, 0.5], 100),
lambda: self._build_dataset([0.25, 0.25, 0.25, 0.25], 1000), 100)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/sample_from_datasets_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the TFRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class TFRecordDatasetSerializationTest(
reader_dataset_ops_test_base.TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type == "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type == "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return core_readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/tf_record_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the CacheDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class CacheDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def setUp(self):
self.range_size = 10
self.num_repeats = 3
self.num_outputs = self.range_size * self.num_repeats
self.cache_file_prefix = 'test'
def make_dataset_fn(self, is_memory):
if is_memory:
filename = ''
else:
filename = os.path.join(self.get_temp_dir(), self.cache_file_prefix)
def ds_fn():
return dataset_ops.Dataset.range(self.range_size).cache(filename).repeat(
self.num_repeats)
return ds_fn
def expected_outputs(self):
return list(range(self.range_size)) * self.num_repeats
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointBeforeOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 5 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointBeforeOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 8 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 8, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(8))
if is_memory:
outputs = outputs[:5]
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
else:
# Restoring from checkpoint and running GetNext should return
# `AlreadExistsError` now because the lockfile already exists.
with self.assertRaises(errors.AlreadyExistsError):
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointAfterOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 15 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointAfterOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 18 entries from iterator but save checkpoint after producing 15.
outputs = self.gen_outputs(
ds_fn, [15], 18, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(8)))
outputs = list(range(10)) + list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointBeforeOneEpochButRunCompleteEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 13 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 13, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(3)))
# Since we ran for more than one epoch, the cache was completely written.
# The ckpt was saved when the iterator was in cache-write mode. Test that
# the iterator falls back to read mode after restoring if the cache has
# been completely written.
outputs = list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointUnusedWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Checkpoint before get_next is called even once.
outputs = self.gen_outputs(ds_fn, [], 0, verify_exhausted=False)
self.assertSequenceEqual(outputs, [])
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, ckpt_saved=True, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testCheckpointUnusedMidwayWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint, then produce no elements and checkpoint.
outputs.extend(
self.gen_outputs(ds_fn, [], 0, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce rest of the elements.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testUnusedCheckpointError(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and save ckpt.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
if is_memory:
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, self.expected_outputs())
else:
# Since the complete cache has not been written, a new iterator which does
# not restore the checkpoint will throw an error since there is a partial
# cache shard.
with self.assertRaises(errors.AlreadyExistsError):
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
@parameterized.named_parameters(
('Memory', True),
('File', False),
)
def testIgnoreCheckpointIfCacheWritten(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 15 elements and save ckpt. This will write the complete cache.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Build the iterator again but do not restore from ckpt. Since the cache
# has already been written we should be able to use it.
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/cache_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the FlatMapDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class FlatMapDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
# Complicated way of saying range(start, start+25).
def build_ds(start):
def map_fn(x):
return dataset_ops.Dataset.range(x, x + 5)
return dataset_ops.Dataset.range(start, start + 5 * 5, 5).flat_map(map_fn)
self.run_core_tests(lambda: build_ds(0), lambda: build_ds(10), 25)
def testMapThenFlatMap(self):
def build_ds():
def flat_map_fn(_):
def map_fn(y):
return 10 * math_ops.cast(y, dtypes.int32)
return dataset_ops.Dataset.range(100).map(map_fn)
return dataset_ops.Dataset.range(5).flat_map(flat_map_fn)
self.run_core_tests(build_ds, None, 500)
def testCaptureDefunInMapFn(self):
def build_ds():
def map_fn(x):
@function.Defun(dtypes.int64)
def defun_fn(x):
return constant_op.constant(1000) + math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.from_tensor_slices([defun_fn(x)])
return dataset_ops.Dataset.range(100).flat_map(map_fn)
self.run_core_tests(build_ds, None, 100)
def testDisallowVariableCapture(self):
def build_ds():
test_var = variable_scope.get_variable(
name="test_var", shape=(), use_resource=True)
return dataset_ops.Dataset.range(5).flat_map(
lambda _: dataset_ops.Dataset.from_tensor_slices([test_var]))
self.verify_error_on_save(build_ds, 5, errors.InvalidArgumentError)
def testDisallowCapturingStatefulOps(self):
def build_ds():
def flat_map_fn(_):
def map_fn(x):
return random_ops.random_uniform(
(), 0, 10, dtype=dtypes.int32) * math_ops.cast(x, dtypes.int32)
return dataset_ops.Dataset.range(100).map(map_fn)
return dataset_ops.Dataset.range(5).flat_map(flat_map_fn)
self.verify_error_on_save(build_ds, 500, errors.InvalidArgumentError)
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _flat_map_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_ds():
return dataset_ops.Dataset.range(10).map(_map_fn).flat_map(_flat_map_fn)
self.run_core_tests(_build_ds, None, 20)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/flat_map_dataset_serialization_test.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the _RebatchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class RebatchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_dataset(num_elements, batch_size):
return distribute._RebatchDataset(
dataset_ops.Dataset.range(num_elements).batch(
4 * batch_size, drop_remainder=True),
num_workers=4)
self.run_core_tests(lambda: build_dataset(200, 10), None, 20)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/rebatch_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ParallelInterleaveDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class ParallelInterleaveDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def setUp(self):
self.input_values = np.array([4, 5, 6], dtype=np.int64)
self.num_repeats = 2
self.num_outputs = np.sum(self.input_values) * 2
def _build_ds(self, cycle_length, block_length, sloppy=False):
return (dataset_ops.Dataset.from_tensor_slices(
self.input_values).repeat(self.num_repeats).apply(
interleave_ops.parallel_interleave(
lambda x: dataset_ops.Dataset.range(10 * x, 11 * x),
cycle_length, block_length, sloppy)))
def testSerializationCore(self):
# cycle_length > 1, block_length > 1
cycle_length = 2
block_length = 3
self.run_core_tests(
lambda: self._build_ds(cycle_length, block_length),
lambda: self._build_ds(cycle_length * 2, block_length * 1),
self.num_outputs)
# cycle_length = 1
cycle_length = 1
block_length = 3
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
None, self.num_outputs)
# block_length = 1
cycle_length = 2
block_length = 1
self.run_core_tests(lambda: self._build_ds(cycle_length, block_length),
None, self.num_outputs)
def testSerializationWithSloppy(self):
break_points = self.gen_break_points(self.num_outputs, 10)
expected_outputs = np.repeat(
np.concatenate([np.arange(10 * x, 11 * x) for x in self.input_values]),
self.num_repeats).tolist()
def run_test(cycle_length, block_length):
actual = self.gen_outputs(
lambda: self._build_ds(cycle_length, block_length, True),
break_points, self.num_outputs)
self.assertSequenceEqual(sorted(actual), expected_outputs)
# cycle_length > 1, block_length > 1
run_test(2, 3)
# cycle_length = 1
run_test(1, 3)
# block_length = 1
run_test(2, 1)
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, 1))
self.run_core_tests(_build_dataset, None, 20)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/parallel_interleave_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for experimental iterator_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import iterator_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator import model_fn
@test_util.run_v1_only('b/123904664')
class CheckpointInputPipelineHookTest(test.TestCase):
@staticmethod
def _model_fn(features, labels, mode, config):
del labels
del mode
del config
global_step = training_util.get_or_create_global_step()
update_global_step_op = global_step.assign_add(1)
latest_feature = variables.VariableV1(
0, name='latest_feature', dtype=dtypes.int64)
store_latest_feature_op = latest_feature.assign(features)
ops.add_to_collection('my_vars', global_step)
ops.add_to_collection('my_vars', latest_feature)
return model_fn.EstimatorSpec(
mode='train',
train_op=control_flow_ops.group(
[update_global_step_op, store_latest_feature_op]),
loss=constant_op.constant(2.0))
def _read_vars(self, model_dir):
"""Returns (global_step, latest_feature)."""
with ops.Graph().as_default() as g:
ckpt_path = checkpoint_management.latest_checkpoint(model_dir)
meta_filename = ckpt_path + '.meta'
saver_lib.import_meta_graph(meta_filename)
saver = saver_lib.Saver()
with self.session(graph=g) as sess:
saver.restore(sess, ckpt_path)
return sess.run(ops.get_collection('my_vars'))
def _build_iterator_saver_hook(self, est):
return iterator_ops.CheckpointInputPipelineHook(est)
def testReturnDatasetFromInputFn(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
def testBuildIteratorInInputFn(self):
def _input_fn():
ds = dataset_ops.Dataset.range(10)
iterator = ds.make_one_shot_iterator()
return iterator.get_next()
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
def testDoNotRestore(self):
def _input_fn():
return dataset_ops.Dataset.range(10)
est = estimator.Estimator(model_fn=self._model_fn)
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1))
est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3))
# Hook not provided, input pipeline was not restored.
est.train(_input_fn, steps=2)
self.assertSequenceEqual(self._read_vars(est.model_dir), (6, 1))
def testRaiseErrorIfNoIterator(self):
def _input_fn():
return constant_op.constant(1, dtype=dtypes.int64)
est = estimator.Estimator(model_fn=self._model_fn)
with self.assertRaises(ValueError):
est.train(
_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)])
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/checkpoint_input_pipeline_hook_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the FixedLengthRecordDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import reader_dataset_ops_test_base
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.platform import test
class FixedLengthRecordDatasetSerializationTest(
reader_dataset_ops_test_base.FixedLengthRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, num_epochs, compression_type=None):
filenames = self._createFiles()
return core_readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes,
self._footer_bytes).repeat(num_epochs)
def testFixedLengthRecordCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/fixed_length_record_dataset_serialization_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the InterleaveDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class InterleaveDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase,
parameterized.TestCase):
def _build_iterator_graph(self, input_values, cycle_length, block_length,
num_parallel_calls):
repeat_count = 2
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
repeat_count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length, num_parallel_calls)
@parameterized.named_parameters(
("1", 2, 3, None),
("2", 2, 3, 1),
("3", 2, 3, 2),
("4", 1, 3, None),
("5", 1, 3, 1),
("6", 2, 1, None),
("7", 2, 1, 1),
("8", 2, 1, 2),
)
def testSerializationCore(self, cycle_length, block_length,
num_parallel_calls):
input_values = np.array([4, 5, 6], dtype=np.int64)
num_outputs = np.sum(input_values) * 2
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(
input_values, cycle_length, block_length, num_parallel_calls),
lambda: self._build_iterator_graph(
input_values, cycle_length * 2, block_length, num_parallel_calls),
num_outputs)
# pylint: enable=g-long-lambda
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1)
self.run_core_tests(_build_dataset, None, 20)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/kernel_tests/serialization/interleave_dataset_serialization_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.CsvDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import string
import tempfile
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
class CsvDatasetBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.CsvDataset`."""
FLOAT_VAL = '1.23456E12'
STR_VAL = string.ascii_letters * 10
def _set_up(self, str_val):
# Since this isn't test.TestCase, have to manually create a test dir
gfile.MakeDirs(googletest.GetTempDir())
self._temp_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())
self._num_cols = [4, 64, 256]
self._num_per_iter = 5000
self._filenames = []
for n in self._num_cols:
fn = os.path.join(self._temp_dir, 'file%d.csv' % n)
with open(fn, 'wb') as f:
# Just write 100 rows and use `repeat`... Assumes the cost
# of creating an iterator is not significant
row = ','.join([str_val for _ in range(n)])
f.write('\n'.join([row for _ in range(100)]))
self._filenames.append(fn)
def _tear_down(self):
gfile.DeleteRecursively(self._temp_dir)
def _run_benchmark(self, dataset, num_cols, prefix):
dataset = dataset.skip(self._num_per_iter - 1)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
deltas = []
for _ in range(10):
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
start = time.time()
# NOTE: This depends on the underlying implementation of skip, to have
# the net effect of calling `GetNext` num_per_iter times on the
# input dataset. We do it this way (instead of a python for loop, or
# batching N inputs in one iter) so that the overhead from session.run
# or batch doesn't dominate. If we eventually optimize skip, this has
# to change.
sess.run(next_element)
end = time.time()
deltas.append(end - start)
# Median wall time per CSV record read and decoded
median_wall_time = np.median(deltas) / self._num_per_iter
self.report_benchmark(
iters=self._num_per_iter,
wall_time=median_wall_time,
name='%s_with_cols_%d' % (prefix, num_cols))
def benchmark_map_with_floats(self):
self._set_up(self.FLOAT_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [[0.0]] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_float_map_decode_csv')
self._tear_down()
def benchmark_map_with_strings(self):
self._set_up(self.STR_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [['']] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = dataset.map(lambda l: parsing_ops.decode_csv(l, **kwargs)) # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_strings_map_decode_csv')
self._tear_down()
def benchmark_csv_dataset_with_floats(self):
self._set_up(self.FLOAT_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [[0.0]] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_float_fused_dataset')
self._tear_down()
def benchmark_csv_dataset_with_strings(self):
self._set_up(self.STR_VAL)
for i in range(len(self._filenames)):
num_cols = self._num_cols[i]
kwargs = {'record_defaults': [['']] * num_cols}
dataset = core_readers.TextLineDataset(self._filenames[i]).repeat()
dataset = readers.CsvDataset(self._filenames[i], **kwargs).repeat() # pylint: disable=cell-var-from-loop
self._run_benchmark(dataset, num_cols, 'csv_strings_fused_dataset')
self._tear_down()
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/csv_dataset_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.rejection_resample()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import resampling
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
def _time_resampling(data_np, target_dist, init_dist, num_to_sample): # pylint: disable=missing-docstring
dataset = dataset_ops.Dataset.from_tensor_slices(data_np).repeat()
# Reshape distribution via rejection sampling.
dataset = dataset.apply(
resampling.rejection_resample(
class_func=lambda x: x,
target_dist=target_dist,
initial_dist=init_dist,
seed=142))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
get_next = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
start_time = time.time()
for _ in xrange(num_to_sample):
sess.run(get_next)
end_time = time.time()
return end_time - start_time
class RejectionResampleBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.rejection_resample()`."""
def benchmark_resample_performance(self):
init_dist = [0.25, 0.25, 0.25, 0.25]
target_dist = [0.0, 0.0, 0.0, 1.0]
num_classes = len(init_dist)
# We don't need many samples to test a dirac-delta target distribution
num_samples = 1000
data_np = np.random.choice(num_classes, num_samples, p=init_dist)
resample_time = _time_resampling(
data_np, target_dist, init_dist, num_to_sample=1000)
self.report_benchmark(iters=1000, wall_time=resample_time, name="resample")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/rejection_resample_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for autotuning performance knobs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class AutotuneBenchmark(test.Benchmark):
"""Benchmarks for autotuning performance knobs."""
def benchmark_map(self):
a = self._benchmark_map(autotune=False)
b = self._benchmark_map(autotune=True)
print("speedup: %f" % (a / b))
def _benchmark_map(self, autotune):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=10000,
wall_time=np.median(deltas),
name="map" + ("_autotune" if autotune else ""))
return np.median(deltas)
def benchmark_map_and_batch(self):
a = self._benchmark_map_and_batch(autotune=False)
b = self._benchmark_map_and_batch(autotune=True)
print("speedup: %f" % (a / b))
def _benchmark_map_and_batch(self, autotune):
batch_size = 16
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(
math_ops.matmul, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_batch_fusion = True
options.experimental_optimization.autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(1000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=1000,
wall_time=np.median(deltas),
name="map_and_batch" + ("_autotune" if autotune else ""))
return np.median(deltas)
def benchmark_interleave(self):
a = self._benchmark_interleave(autotune=False)
b = self._benchmark_interleave(autotune=True)
print("speedup: %f" % (a / b))
def _benchmark_interleave(self, autotune):
k = 1024 * 1024
dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
np.random.rand(4 * k,
1))).repeat()
dataset = dataset.map(math_ops.matmul)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
cycle_length=10,
num_parallel_calls=dataset_ops.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next.op)
for _ in range(10000):
start = time.time()
sess.run(get_next.op)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=10000,
wall_time=np.median(deltas),
name="interleave" + ("_autotune" if autotune else ""))
return np.median(deltas)
def benchmark_map_and_interleave(self):
a = self._benchmark_map_and_interleave(autotune=False)
b = self._benchmark_map_and_interleave(autotune=True)
print("speedup: %f" % (a / b))
def _benchmark_map_and_interleave(self, autotune):
k = 1024 * 1024
a = (np.random.rand(1, 8 * k), np.random.rand(8 * k, 1))
b = (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))
c = (np.random.rand(1, 2 * k), np.random.rand(2 * k, 1))
dataset_a = dataset_ops.Dataset.from_tensors(a).repeat()
dataset_b = dataset_ops.Dataset.from_tensors(b).repeat()
dataset_c = dataset_ops.Dataset.from_tensors(c).repeat()
def f1(x, y):
return math_ops.matmul(x, y)
def f2(a, b):
x, y = b
return a, math_ops.matmul(x, y)
dataset = dataset_a
dataset = dataset.map(f1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_b))
dataset = dataset.map(f2, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset_ops.Dataset.range(1).repeat().interleave(
lambda _: dataset,
num_parallel_calls=dataset_ops.AUTOTUNE,
cycle_length=2)
dataset = dataset_ops.Dataset.zip((dataset, dataset_c))
dataset = dataset.map(f2, num_parallel_calls=dataset_ops.AUTOTUNE)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.autotune = autotune
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
get_next = iterator.get_next()
deltas = []
with session.Session() as sess:
for _ in range(5):
sess.run(get_next)
for _ in range(1000):
start = time.time()
sess.run(get_next)
end = time.time()
deltas.append(end - start)
self.report_benchmark(
iters=1000,
wall_time=np.median(deltas),
name="map_and_interleave" + ("_autotune" if autotune else ""))
return np.median(deltas)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/autotune_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.snapshot()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.client import session
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.experimental.ops import snapshot
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors_impl as errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class SnapshotDatasetBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.experimental.snapshot()`."""
def _makeSnapshotDirectory(self):
tmp_dir = test.get_temp_dir()
tmp_dir = os.path.join(tmp_dir, "snapshot")
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
return tmp_dir
def _createSimpleDataset(self, num_elems, tmp_dir=None,
compression=snapshot.COMPRESSION_NONE):
if not tmp_dir:
tmp_dir = self._makeSnapshotDirectory()
dataset = dataset_ops.Dataset.from_tensor_slices([1.0])
dataset = dataset.map(
lambda x: gen_array_ops.broadcast_to(x, [50, 50, 3]))
dataset = dataset.repeat(num_elems)
dataset = dataset.apply(snapshot.snapshot(tmp_dir, compression=compression))
return dataset
def _consumeDataset(self, dataset, num_elems):
dataset = dataset.skip(num_elems)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
try:
sess.run(next_element)
except errors.OutOfRangeError:
pass
def benchmarkWriteSnapshotGzipCompression(self):
num_elems = 500000
dataset = self._createSimpleDataset(
num_elems, compression=snapshot.COMPRESSION_GZIP)
self.run_and_report_benchmark(dataset, num_elems, "write_gzip",
warmup=False, iters=1)
def benchmarkWriteSnapshotSimple(self):
num_elems = 500000
dataset = self._createSimpleDataset(num_elems)
# We only run one iteration here because running multiple iterations will
# cause the later iterations to simply read from the already written
# snapshot rather than write a new one.
self.run_and_report_benchmark(dataset, num_elems, "write_simple",
warmup=False, iters=1)
def benchmarkPassthroughSnapshotSimple(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(num_elems, tmp_dir)
# Consume only 1 element, thus making sure we don't finalize.
self._consumeDataset(dataset, 1)
self.run_and_report_benchmark(dataset, num_elems, "passthrough_simple")
def benchmarkReadSnapshotSimple(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(num_elems, tmp_dir)
# consume all the elements to let snapshot write things to disk
self._consumeDataset(dataset, num_elems)
self.run_and_report_benchmark(dataset, num_elems, "read_simple")
def benchmarkReadSnapshotGzipCompression(self):
num_elems = 100000
tmp_dir = self._makeSnapshotDirectory()
dataset = self._createSimpleDataset(
num_elems, tmp_dir, compression=snapshot.COMPRESSION_GZIP)
self._consumeDataset(dataset, num_elems)
self.run_and_report_benchmark(dataset, num_elems, "read_gzip")
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/snapshot_dataset_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for static optimizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks too.
class OptimizationBenchmark(test.Benchmark):
"""Benchmarks for static optimizations."""
def benchmark_map_fusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_map_fusion(chain_length, False)
self._benchmark_map_fusion(chain_length, True)
def _benchmark_map_fusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "noopt"
self.report_benchmark(
iters=100,
wall_time=median_wall_time,
name="map_fusion_{}_chain_length_{}".format(
opt_mark, chain_length))
def benchmark_map_and_filter_fusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_map_and_filter_fusion(chain_length, False)
self._benchmark_map_and_filter_fusion(chain_length, True)
def _benchmark_map_and_filter_fusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x + 5).filter(
lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "noopt"
self.report_benchmark(
iters=100,
wall_time=median_wall_time,
name="map_and_filter_fusion_{}_chain_length_{}".format(
opt_mark, chain_length))
# This benchmark compares the performance of pipeline with multiple chained
# filter with and without filter fusion.
def benchmark_filter_fusion(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_filter_fusion(chain_length, False)
self._benchmark_filter_fusion(chain_length, True)
def _benchmark_filter_fusion(self, chain_length, optimize_dataset):
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(5).repeat(None)
for _ in range(chain_length):
dataset = dataset.filter(lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_fusion = True
dataset = dataset.with_options(options)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
opt_mark = "opt" if optimize_dataset else "no-opt"
self.report_benchmark(
iters=1000,
wall_time=median_wall_time,
name="chain_length_{}_{}".format(opt_mark, chain_length))
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/optimize_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for the `MapVectorization` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
def _generate_csv_test_case():
"""Generates a `decode_csv()` test case."""
def csv_factory():
return dataset_ops.Dataset.from_tensor_slices(["1.0:2:a",
"2.4:5:c"]).repeat(5)
def decode_csv_fn(x):
return parsing_ops.decode_csv(
x,
record_defaults=[
constant_op.constant([], dtypes.float32),
constant_op.constant([], dtypes.int32),
constant_op.constant([], dtypes.string)
],
field_delim=":")
return decode_csv_fn, csv_factory
def _generate_parse_single_example_test_case():
"""Generates a `parse_single_example()` test case."""
def parse_example_factory():
"""Parse example factory."""
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
return dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
]))
def parse_single_example_fn(x):
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
return parsing_ops.parse_single_example(x, features)
return parse_single_example_fn, parse_example_factory
# TODO(rachelim): Add a benchmark for more expensive transformations, such as
# vgg_preprocessing.
class MapVectorizationBenchmark(test.Benchmark):
"""Benchmarks for the `MapVectorization` optimization."""
def _run(self, x, num_iters=100, name=None):
deltas = []
with session.Session() as sess:
for _ in range(5):
# Warm up session...
sess.run(x)
for _ in range(num_iters):
start = time.time()
sess.run(x)
end = time.time()
deltas.append(end - start)
median_time = np.median(deltas)
self.report_benchmark(iters=num_iters, wall_time=median_time, name=name)
return median_time
def _compare(self, input_dataset, map_fn, batch_size, input_size, str_id):
num_elems = int(np.sum([np.prod(x) for x in input_size]))
name_template = "{}_batch_size_{}_input_element_size_{}_{}"
unoptimized_dataset = input_dataset.map(map_fn).batch(batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
unoptimized_dataset = unoptimized_dataset.with_options(options)
unoptimized_next = dataset_ops.make_one_shot_iterator(
unoptimized_dataset).get_next()
options = dataset_ops.Options()
options.experimental_optimization.map_vectorization.enabled = True
optimized_dataset = unoptimized_dataset.with_options(options)
optimized_next = dataset_ops.make_one_shot_iterator(
optimized_dataset).get_next()
unoptimized_time = self._run(
unoptimized_next,
name=name_template.format(str_id, batch_size, num_elems, "unoptimized"))
optimized_time = self._run(
optimized_next,
name=name_template.format(str_id, batch_size, num_elems, "optimized"))
print("Batch size: {}\n"
"Input element size: {}\n"
"Transformation: {}\n"
"Speedup: {}\n".format(batch_size, input_size, str_id,
(unoptimized_time / optimized_time)))
# Known cheap functions
def benchmark_identity(self):
self._benchmark_helper(lambda *args: [array_ops.identity(x) for x in args],
"identity")
def benchmark_add_const(self):
self._benchmark_helper(lambda *args: [x + 1 for x in args], "add_const")
def benchmark_return_const(self):
self._benchmark_helper(lambda *args: [constant_op.constant(2)], "ret_const")
def benchmark_select(self):
self._benchmark_helper(lambda *args: args[0], "select")
def benchmark_cast(self):
self._benchmark_helper(
lambda *args: [math_ops.cast(x, dtypes.float32) for x in args], "cast")
def benchmark_reshape(self):
self._benchmark_helper(
lambda *args: [array_ops.reshape(x, (-1, 30)) for x in args], "reshape")
def benchmark_decode_csv(self):
csv_fn, csv_factory = _generate_csv_test_case()
self._benchmark_helper(csv_fn, "decode_csv", lambda: [csv_factory()])
def benchmark_parse_single_example(self):
# NOTE: Since we haven't implemented a vectorizer for "SerializeSparse",
# this function is only naively vectorized.
parse_fn, parse_factory = _generate_parse_single_example_test_case()
self._benchmark_helper(parse_fn, "parse_single_example",
lambda: [parse_factory()])
def _default_dataset_factory(self):
input_sizes = [(10, 10, 3), (10, 100, 300)]
for sz in input_sizes:
yield dataset_ops.Dataset.from_tensor_slices(np.random.rand(*sz))
def _benchmark_helper(self, map_fn, str_id, base_dataset_factory=None):
if base_dataset_factory is None:
base_dataset_factory = self._default_dataset_factory
batch_size = 1000
for base_dataset in base_dataset_factory():
base_dataset = base_dataset.repeat()
input_size = [
tuple(shape.as_list())
for shape in nest.flatten(
dataset_ops.get_legacy_output_shapes(base_dataset))
]
self._compare(base_dataset, map_fn, batch_size, input_size, str_id)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/map_vectorization_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for static optimizations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks too.
class ChooseFastestBenchmark(test.Benchmark):
"""Benchmarks for static optimizations."""
def benchmark_choose_fastest(self):
dataset = dataset_ops.Dataset.range(1000**2).repeat()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
map_batch_dataset = dataset.map(lambda x: x + 1).batch(100)
batch_map_dataset = dataset.batch(100).map(lambda x: x + 1)
merge_dataset = optimization._ChooseFastestDataset( # pylint: disable=protected-access
[batch_map_dataset, map_batch_dataset])
self._benchmark(map_batch_dataset, "map_batch_dataset")
self._benchmark(batch_map_dataset, "batch_map_dataset")
self._benchmark(merge_dataset, "merge_dataset")
def benchmark_choose_fastest_first_n_iterations(self):
dataset = dataset_ops.Dataset.range(1000**2).repeat()
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
map_batch_dataset = dataset.map(lambda x: x + 1).batch(100)
batch_map_dataset = dataset.batch(100).map(lambda x: x + 1)
merge_dataset = optimization._ChooseFastestDataset( # pylint: disable=protected-access
[batch_map_dataset, map_batch_dataset])
self._benchmark_first_n(map_batch_dataset, "map_batch_dataset")
self._benchmark_first_n(batch_map_dataset, "batch_map_dataset")
self._benchmark_first_n(merge_dataset, "merge_dataset")
def _benchmark_first_n(self, dataset, name):
n = 10 # The default num_experiments for ChooseFastestDataset
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
deltas = []
for _ in range(100):
with session.Session() as sess:
start = time.time()
for _ in range(n):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / n
self.report_benchmark(
iters=n, wall_time=median_wall_time, name=name + "_first_%d" % n)
def _benchmark(self, dataset, name):
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
# Run 10 steps to warm up the session caches before taking the first
# measurement. Additionally, 10 is the default num_experiments for
# ChooseFastestDataset.
for _ in range(10):
sess.run(next_element.op)
deltas = []
for _ in range(50):
start = time.time()
for _ in range(50):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
self.report_benchmark(iters=100, wall_time=median_wall_time, name=name)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/choose_fastest_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import itertools
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
_NUMPY_RANDOM_SEED = 42
class MapAndBatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.map_and_batch()`."""
def benchmark_map_and_batch(self):
"""Measures the performance of parallelized batching."""
shapes = [(), (10,), (10, 10), (10, 10, 10), (224, 224, 3)]
batch_size_values = [1, 32, 64, 128, 1024]
shape_placeholder = array_ops.placeholder(dtypes.int64, shape=[None])
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset_ops.Dataset.range(1000000000)
dense_value = random_ops.random_normal(shape=shape_placeholder)
dataset = dataset.apply(batching.map_and_batch(
lambda _: dense_value, batch_size_placeholder))
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
for shape in shapes:
for batch_size in batch_size_values:
with session.Session() as sess:
sess.run(iterator.initializer, feed_dict={
shape_placeholder: shape, batch_size_placeholder: batch_size})
# Use a C++ callable to minimize the Python overhead in the benchmark.
callable_opts = config_pb2.CallableOptions()
callable_opts.target.append(next_element.op.name)
op_callable = sess._make_callable_from_options(callable_opts) # pylint: disable=protected-access
# Run five steps to warm up the session caches before taking the
# first measurement.
for _ in range(5):
op_callable()
deltas = []
overall_start = time.time()
# Run at least five repetitions and for at least five seconds.
while len(deltas) < 5 or time.time() - overall_start < 5.0:
start = time.time()
for _ in range(100):
op_callable()
end = time.time()
deltas.append(end - start)
del op_callable
median_wall_time = np.median(deltas) / 100.0
iters = len(deltas) * 100
self.report_benchmark(
iters=iters, wall_time=median_wall_time,
name="num_elements_%d_batch_size_%d" % (np.prod(shape), batch_size))
def benchmark_map_and_batch_chaining_versus_fusing(self):
"""Compares the performance of chaining and fusing map and batch.
NOTE: It is recommended to build the benchmark with
`-c opt --copt=-mavx --copt=-mavx2 --copt=-mfma --copt=-gmlt`
and execute it on a machine with at least 32 CPU cores.
"""
# Sequential pipeline configurations.
seq_elem_size_series = itertools.product([1], [1], [1, 2, 4, 8], [16])
seq_batch_size_series = itertools.product([1], [1], [1], [8, 16, 32, 64])
# Parallel pipeline configuration.
par_elem_size_series = itertools.product([32], [32], [1, 2, 4, 8], [256])
par_batch_size_series = itertools.product([32], [32], [1],
[128, 256, 512, 1024])
par_num_calls_series = itertools.product([8, 16, 32, 64], [32], [1], [512])
par_inter_op_series = itertools.product([32], [8, 16, 32, 64], [1], [512])
def name(method, label, num_calls, inter_op, element_size, batch_size):
return ("%s_id_%s_num_calls_%d_inter_op_%d_elem_size_%d_batch_size_%d" % (
method,
hashlib.sha1(label).hexdigest()[:8],
num_calls,
inter_op,
element_size,
batch_size,
))
def benchmark(label, series):
"""Runs benchmark the given series."""
def make_dataset(element_size, num_calls, batch_size): # pylint: disable=missing-docstring
k = 1024 * 1024
x = constant_op.constant(np.random.rand(element_size, 4 * k))
y = constant_op.constant(np.random.rand(4 * k, 1))
dataset = dataset_ops.Dataset.range(1000000000000).map(lambda _: (x, y))
dataset = dataset.map(
math_ops.matmul,
num_parallel_calls=num_calls).batch(batch_size=batch_size)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
return dataset.with_options(options)
for num_calls, inter_op, element_size, batch_size in series:
num_iters = 1024 // (
(element_size * batch_size) // min(num_calls, inter_op))
# By default the chained map().batch() calls will not be fused.
chained_dataset = make_dataset(element_size, num_calls, batch_size)
chained_iterator = dataset_ops.make_one_shot_iterator(chained_dataset)
chained_get_next = chained_iterator.get_next()
chained_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(chained_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(chained_get_next.op)
end = time.time()
chained_deltas.append(end - start)
self.report_benchmark(
iters=num_iters,
wall_time=np.median(chained_deltas),
name=name("chained", label, num_calls, inter_op, element_size,
batch_size))
# Apply an option to the default dataset that will fuse map().batch().
options = dataset_ops.Options()
options.experimental_optimization.map_and_batch_fusion = True
fused_dataset = chained_dataset.with_options(options)
fused_iterator = dataset_ops.make_one_shot_iterator(fused_dataset)
fused_get_next = fused_iterator.get_next()
fused_deltas = []
with session.Session(
config=config_pb2.ConfigProto(
inter_op_parallelism_threads=inter_op,
use_per_session_threads=True)) as sess:
for _ in range(5):
sess.run(fused_get_next.op)
for _ in range(num_iters):
start = time.time()
sess.run(fused_get_next.op)
end = time.time()
fused_deltas.append(end - start)
self.report_benchmark(
iters=num_iters,
wall_time=np.median(fused_deltas),
name=name("fused", label, num_calls, inter_op, element_size,
batch_size))
print()
np.random.seed(_NUMPY_RANDOM_SEED)
benchmark("Sequential element size evaluation", seq_elem_size_series)
benchmark("Sequential batch size evaluation", seq_batch_size_series)
benchmark("Parallel element size evaluation", par_elem_size_series)
benchmark("Parallel batch size evaluation", par_batch_size_series)
benchmark("Transformation parallelism evaluation", par_num_calls_series)
benchmark("Threadpool size evaluation", par_inter_op_series)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/map_and_batch_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for ChooseFastestBranchDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
class ChooseFastestBranchBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for ChooseFastestBranchDatast."""
def make_benchmark_datasets(self,
input_dataset,
branch_0,
branch_1,
ratio_numerator,
num_elements_per_branch=None):
ds_0 = branch_0(input_dataset)
ds_1 = branch_1(input_dataset)
choose_fastest_dataset = optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
input_dataset, [branch_0, branch_1],
ratio_numerator=ratio_numerator,
num_elements_per_branch=num_elements_per_branch)
return ds_0, ds_1, choose_fastest_dataset
def make_simple_benchmark_datasets(self):
dataset = dataset_ops.Dataset.range(1000**2).repeat()
def branch_0(dataset):
return dataset.map(lambda x: x + 1).batch(100)
def branch_1(dataset):
return dataset.batch(100).map(lambda x: x + 1)
return self.make_benchmark_datasets(dataset, branch_0, branch_1, 100)
def benchmark_choose_fastest(self):
map_batch, batch_map, choose_fastest = self.make_simple_benchmark_datasets()
def benchmark(dataset, name):
self.run_and_report_benchmark(dataset, 5000, name, iters=1)
benchmark(map_batch, "map_batch_dataset")
benchmark(batch_map, "batch_map_dataset")
benchmark(choose_fastest, "choose_fastest_dataset")
def benchmark_choose_fastest_first_n_iterations(self):
map_batch, batch_map, choose_fastest = self.make_simple_benchmark_datasets()
def benchmark(dataset, name):
self.run_and_report_benchmark(
dataset, num_elements=10, name="%s_first_10" % name, iters=5)
benchmark(map_batch, "map_batch_dataset")
benchmark(batch_map, "batch_map_dataset")
benchmark(choose_fastest, "choose_fastest_dataset")
def benchmark_with_input_skew(self):
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
# Dataset with 100 elements that emulates performance characteristics of a
# file-based dataset stored in remote storage, where the first element
# takes significantly longer to produce than the remaining elements.
input_dataset = make_dataset(1000 * 1000,
0).concatenate(make_dataset(1, 100)).take(100)
def slow_branch(dataset):
return dataset.apply(sleep.sleep(10000))
def fast_branch(dataset):
return dataset.apply(sleep.sleep(10))
def benchmark(dataset, name):
self.run_and_report_benchmark(
dataset, num_elements=100, name="%s_with_skew" % name, iters=1)
# ChooseFastestBranch dataset should choose the same branch regardless
# of the order of the branches, so we expect the iteration speed to be
# comparable for both versions.
slow_ds, fast_ds, choose_fastest_0 = self.make_benchmark_datasets(
input_dataset, slow_branch, fast_branch, 1, num_elements_per_branch=2)
_, _, choose_fastest_1 = self.make_benchmark_datasets(
input_dataset, fast_branch, slow_branch, 1, num_elements_per_branch=2)
benchmark(slow_ds, "slow_dataset")
benchmark(fast_ds, "fast_dataset")
benchmark(choose_fastest_0, "choose_fastest_dataset_0")
benchmark(choose_fastest_1, "choose_fastest_dataset_1")
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/choose_fastest_branch_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _make_fake_dataset_fn():
"""Returns a dataset that emulates a remote storage data source.
Returns a dataset factory which creates a dataset with 100 elements that
emulates the performance characteristic of a file-based dataset stored in a
remote storage. In particular, the first element will take an order of
magnitude longer to produce than the remaining elements (1s vs. 1ms).
"""
def fake_dataset_fn(unused):
del unused
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
return make_dataset(1000 * 1000, 0).concatenate(make_dataset(1000,
100)).take(100)
return fake_dataset_fn
class ParallelInterleaveBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
def _benchmark(self, dataset_fn, iters, num_elements):
with ops.Graph().as_default():
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset_fn().with_options(options)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
deltas = []
for _ in range(iters):
start = time.time()
for _ in range(num_elements):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
mean_wall_time = np.mean(deltas) / num_elements
self.report_benchmark(iters=iters, wall_time=mean_wall_time)
def benchmark_sequential_interleave(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(), cycle_length=10)
self._benchmark(dataset_fn=dataset_fn, iters=10, num_elements=100)
def benchmark_parallel_interleave_v1(self):
"""Benchmark for parallel interleave that does not support autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().apply(
interleave_ops.parallel_interleave(
_make_fake_dataset_fn(), cycle_length=10))
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
def benchmark_parallel_interleave_v2(self):
"""Benchmark for parallel interleave that supports autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(),
cycle_length=10, num_parallel_calls=dataset_ops.AUTOTUNE)
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.unbatch()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class UnbatchBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.unbatch()`."""
def benchmark_native_unbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.apply(batching.unbatch())
dataset = dataset.skip(elems_per_trial)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="native_batch_size_%d" %
batch_size)
# Include a benchmark of the previous `unbatch()` implementation that uses
# a composition of more primitive ops. Eventually we'd hope to generate code
# that is as good in both cases.
def benchmark_old_unbatch_implementation(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
elems_per_trial = 10000
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
dataset = dataset.batch(batch_size_placeholder)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
dataset = dataset.skip(elems_per_trial)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
with session.Session() as sess:
for batch_size in batch_sizes:
deltas = []
for _ in range(5):
sess.run(
iterator.initializer,
feed_dict={batch_size_placeholder: batch_size})
start = time.time()
sess.run(next_element.op)
end = time.time()
deltas.append((end - start) / elems_per_trial)
median_wall_time = np.median(deltas)
self.report_benchmark(
iters=10000,
wall_time=median_wall_time,
name="unfused_batch_size_%d" %
batch_size)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/unbatch_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for the experimental `MatchingFilesDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class MatchingFilesBenchmark(test.Benchmark):
"""Benchmark for the experimental `MatchingFilesDataset`."""
def benchmark_nested_directories(self):
tmp_dir = tempfile.mkdtemp()
width = 500
depth = 10
for i in range(width):
for j in range(depth):
new_base = os.path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
open(filename, 'w').close()
patterns = [
os.path.join(tmp_dir, os.path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
deltas = []
iters = 3
for _ in range(iters):
with ops.Graph().as_default():
dataset = matching_files.MatchingFilesDataset(patterns)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
sub_deltas = []
while True:
try:
start = time.time()
sess.run(next_element)
end = time.time()
sub_deltas.append(end - start)
except errors.OutOfRangeError:
break
deltas.append(sub_deltas)
median_deltas = np.median(deltas, axis=0)
self.report_benchmark(
iters=iters,
wall_time=np.sum(median_deltas),
extras={
'read first file:':
median_deltas[0],
'read second file:':
median_deltas[1],
'avg time for reading %d more filenames:' %
(len(median_deltas) - 2):
np.average(median_deltas[2:])
},
name='nested_directory(%d*%d)' %
(width, depth))
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/matching_files_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager benchmarks too.
class MapDefunBenchmark(test.Benchmark):
"""Benchmarks for MapDefunOp."""
def _run(self, op, name=None, num_iters=3000):
with session.Session() as sess:
for _ in range(5):
sess.run(op)
start = time.time()
for _ in range(num_iters):
sess.run(op)
end = time.time()
mean_us = (end - start) * 1e6 / num_iters
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=mean_us,
extras={"examples_per_sec": num_iters / (end - start)})
def benchmark_defun_vs_map_fn(self):
"""Benchmarks to compare the performance of MapDefun vs tf.map_fn."""
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def defun(x):
return array_ops.identity(x)
def fn(x):
return array_ops.identity(x)
base = math_ops.range(100)
for input_size in [10, 100, 1000, 10000]:
num_iters = 100000 // input_size
map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])
map_fn_op = map_fn.map_fn(fn, base)
self._run(
map_defun_op, "with_defun_size_%d" % input_size, num_iters=num_iters)
self._run(
map_fn_op, "without_defun_size_%d" % input_size, num_iters=num_iters)
if __name__ == "__main__":
test.main()
|
tensorflow-master
|
tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for controlling distribution in `tf.data` pipelines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.util import options
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.DistributeOptions")
class DistributeOptions(options.OptionsBase):
"""Represents options for distributed data processing.
You can set the distribution options of a dataset through the
`experimental_distribute` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.DistributeOptions`.
```python
options = tf.data.Options()
options.experimental_distribute.auto_shard = False
dataset = dataset.with_options(options)
```
"""
auto_shard = options.create_option(
name="auto_shard",
ty=bool,
docstring=
"Whether the dataset should be automatically sharded when processed"
"in a distributed fashion. This is applicable when using Keras with "
"multi-worker/TPU distribution strategy, and by "
"using strategy.experimental_distribute_dataset(). In other cases, this "
"option does nothing. If None, defaults to True.",
default_factory=lambda: True)
num_devices = options.create_option(
name="num_devices",
ty=int,
docstring=
"The number of devices attached to this input pipeline. This will be "
"automatically set by MultiDeviceIterator.")
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/distribute_options.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Batching dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import convert
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.dense_to_sparse_batch")
def dense_to_sparse_batch(batch_size, row_shape):
"""A transformation that batches ragged elements into `tf.SparseTensor`s.
Like `Dataset.padded_batch()`, this transformation combines multiple
consecutive elements of the dataset, which might have different
shapes, into a single element. The resulting element has three
components (`indices`, `values`, and `dense_shape`), which
comprise a `tf.SparseTensor` that represents the same data. The
`row_shape` represents the dense shape of each row in the
resulting `tf.SparseTensor`, to which the effective batch size is
prepended. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.dense_to_sparse_batch(
batch_size=2, row_shape=[6])) ==
{
([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices
['a', 'b', 'c', 'a', 'b'], # values
[2, 6]), # dense_shape
([[0, 0], [0, 1], [0, 2], [0, 3]],
['a', 'b', 'c', 'd'],
[1, 6])
}
```
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like object
representing the equivalent dense shape of a row in the resulting
`tf.SparseTensor`. Each element of this dataset must have the same rank as
`row_shape`, and must have size less than or equal to `row_shape` in each
dimension.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _DenseToSparseBatchDataset(dataset, batch_size, row_shape)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.experimental.map_and_batch()")
@tf_export(v1=["data.experimental.map_and_batch_with_legacy_function"])
def map_and_batch_with_legacy_function(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
NOTE: This is an escape hatch for existing uses of `map_and_batch` that do not
work with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map_and_batch` as this method will not be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise ValueError("The `num_parallel_batches` and `num_parallel_calls` "
"arguments are mutually exclusive.")
def _apply_fn(dataset):
return _MapAndBatchDataset(dataset, map_func, batch_size,
num_parallel_calls, drop_remainder,
use_legacy_function=True)
return _apply_fn
@deprecation.deprecated(
None,
"Use `tf.data.Dataset.map(map_func, num_parallel_calls)` followed by "
"`tf.data.Dataset.batch(batch_size, drop_remainder)`. Static tf.data "
"optimizations will take care of using the fused implementation.")
@tf_export("data.experimental.map_and_batch")
def map_and_batch(map_func,
batch_size,
num_parallel_batches=None,
drop_remainder=False,
num_parallel_calls=None):
"""Fused implementation of `map` and `batch`.
Maps `map_func` across `batch_size` consecutive elements of this dataset
and then combines them into a batch. Functionally, it is equivalent to `map`
followed by `batch`. However, by fusing the two transformations together, the
implementation can be more efficient. Surfacing this transformation in the API
is temporary. Once automatic input pipeline optimization is implemented,
the fusing of `map` and `batch` will happen automatically and this API will be
deprecated.
Args:
map_func: A function mapping a nested structure of tensors to another
nested structure of tensors.
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
num_parallel_batches: (Optional.) A `tf.int64` scalar `tf.Tensor`,
representing the number of batches to create in parallel. On one hand,
higher values can help mitigate the effect of stragglers. On the other
hand, higher values can increase contention if CPU is scarce.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in case its size is smaller than
desired; the default behavior is not to drop the smaller batch.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number of elements to process in parallel. If not
specified, `batch_size * num_parallel_batches` elements will be processed
in parallel. If the value `tf.data.experimental.AUTOTUNE` is used, then
the number of parallel calls is set dynamically based on available CPU.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
specified.
"""
if num_parallel_batches is None and num_parallel_calls is None:
num_parallel_calls = batch_size
elif num_parallel_batches is not None and num_parallel_calls is None:
num_parallel_calls = batch_size * num_parallel_batches
elif num_parallel_batches is not None and num_parallel_calls is not None:
raise ValueError("The `num_parallel_batches` and `num_parallel_calls` "
"arguments are mutually exclusive.")
def _apply_fn(dataset):
return _MapAndBatchDataset(dataset, map_func, batch_size,
num_parallel_calls, drop_remainder)
return _apply_fn
@deprecation.deprecated(None, "Use `tf.data.Dataset.unbatch()`.")
@tf_export("data.experimental.unbatch")
def unbatch():
"""Splits elements of a dataset into multiple elements on the batch dimension.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
a.apply(tf.data.experimental.unbatch()) == {
'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return dataset.unbatch()
return _apply_fn
class _DenseToSparseBatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that batches ragged dense elements into `tf.SparseTensor`s."""
def __init__(self, input_dataset, batch_size, row_shape):
"""See `Dataset.dense_to_sparse_batch()` for more details."""
if not isinstance(
dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType):
raise TypeError("DenseToSparseDataset requires an input whose elements "
"have a single component, whereas the input has %r." %
dataset_ops.get_legacy_output_types(input_dataset))
self._input_dataset = input_dataset
self._batch_size = batch_size
self._row_shape = row_shape
self._structure = structure.SparseTensorStructure(
dataset_ops.get_legacy_output_types(input_dataset),
tensor_shape.vector(None).concatenate(self._row_shape))
variant_tensor = ged_ops.experimental_dense_to_sparse_batch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._batch_size,
row_shape=convert.partial_shape_to_tensor(self._row_shape),
**dataset_ops.flat_structure(self))
super(_DenseToSparseBatchDataset, self).__init__(input_dataset,
variant_tensor)
@property
def _element_structure(self):
return self._structure
class _MapAndBatchDataset(dataset_ops.UnaryDataset):
"""A `Dataset` that maps a function over a batch of elements."""
def __init__(self, input_dataset, map_func, batch_size, num_parallel_calls,
drop_remainder, use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._map_func = dataset_ops.StructuredFunctionWrapper(
map_func,
"tf.data.experimental.map_and_batch()",
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._batch_size_t = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._num_parallel_calls_t = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
self._drop_remainder_t = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder_t)
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
self._structure = self._map_func.output_structure._batch( # pylint: disable=protected-access
tensor_util.constant_value(self._batch_size_t))
else:
self._structure = self._map_func.output_structure._batch(None) # pylint: disable=protected-access
variant_tensor = ged_ops.experimental_map_and_batch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
batch_size=self._batch_size_t,
num_parallel_calls=self._num_parallel_calls_t,
drop_remainder=self._drop_remainder_t,
preserve_cardinality=True,
**dataset_ops.flat_structure(self))
super(_MapAndBatchDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def _element_structure(self):
return self._structure
|
tensorflow-master
|
tensorflow/python/data/experimental/ops/batching.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.